Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/arm64/vmm/vmm_hyp.c
107787 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2021 Andrew Turner
5
*
6
* This work was supported by Innovate UK project 105694, "Digital Security
7
* by Design (DSbD) Technology Platform Prototype".
8
*
9
* Redistribution and use in source and binary forms, with or without
10
* modification, are permitted provided that the following conditions
11
* are met:
12
* 1. Redistributions of source code must retain the above copyright
13
* notice, this list of conditions and the following disclaimer.
14
* 2. Redistributions in binary form must reproduce the above copyright
15
* notice, this list of conditions and the following disclaimer in the
16
* documentation and/or other materials provided with the distribution.
17
*
18
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
* SUCH DAMAGE.
29
*/
30
31
#include <sys/cdefs.h>
32
#include <sys/types.h>
33
#include <sys/proc.h>
34
35
36
#include "arm64.h"
37
#include "hyp.h"
38
39
struct hypctx;
40
41
uint64_t VMM_HYP_FUNC(do_call_guest)(struct hypctx *);
42
43
static void
44
vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest,
45
bool ecv_poff)
46
{
47
uint64_t dfr0;
48
49
if (guest) {
50
/* Store the timer registers */
51
hypctx->vtimer_cpu.cntkctl_el1 =
52
READ_SPECIALREG(EL1_REG(CNTKCTL));
53
hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 =
54
READ_SPECIALREG(EL0_REG(CNTV_CVAL));
55
hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0 =
56
READ_SPECIALREG(EL0_REG(CNTV_CTL));
57
}
58
if (guest_or_nonvhe(guest) && ecv_poff) {
59
/*
60
* If we have ECV then the guest could modify these registers.
61
* If VHE is enabled then the kernel will see a different view
62
* of the registers, so doesn't need to handle them.
63
*/
64
hypctx->vtimer_cpu.phys_timer.cntx_cval_el0 =
65
READ_SPECIALREG(EL0_REG(CNTP_CVAL));
66
hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0 =
67
READ_SPECIALREG(EL0_REG(CNTP_CTL));
68
}
69
70
if (guest) {
71
/* Store the GICv3 registers */
72
hypctx->vgic_v3_regs.ich_eisr_el2 =
73
READ_SPECIALREG(ich_eisr_el2);
74
hypctx->vgic_v3_regs.ich_elrsr_el2 =
75
READ_SPECIALREG(ich_elrsr_el2);
76
hypctx->vgic_v3_regs.ich_hcr_el2 =
77
READ_SPECIALREG(ich_hcr_el2);
78
hypctx->vgic_v3_regs.ich_misr_el2 =
79
READ_SPECIALREG(ich_misr_el2);
80
hypctx->vgic_v3_regs.ich_vmcr_el2 =
81
READ_SPECIALREG(ich_vmcr_el2);
82
switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
83
#define STORE_LR(x) \
84
case x: \
85
hypctx->vgic_v3_regs.ich_lr_el2[x] = \
86
READ_SPECIALREG(ich_lr ## x ##_el2)
87
STORE_LR(15);
88
STORE_LR(14);
89
STORE_LR(13);
90
STORE_LR(12);
91
STORE_LR(11);
92
STORE_LR(10);
93
STORE_LR(9);
94
STORE_LR(8);
95
STORE_LR(7);
96
STORE_LR(6);
97
STORE_LR(5);
98
STORE_LR(4);
99
STORE_LR(3);
100
STORE_LR(2);
101
STORE_LR(1);
102
default:
103
STORE_LR(0);
104
#undef STORE_LR
105
}
106
107
switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
108
#define STORE_APR(x) \
109
case x: \
110
hypctx->vgic_v3_regs.ich_ap0r_el2[x] = \
111
READ_SPECIALREG(ich_ap0r ## x ##_el2); \
112
hypctx->vgic_v3_regs.ich_ap1r_el2[x] = \
113
READ_SPECIALREG(ich_ap1r ## x ##_el2)
114
STORE_APR(3);
115
STORE_APR(2);
116
STORE_APR(1);
117
default:
118
STORE_APR(0);
119
#undef STORE_APR
120
}
121
}
122
123
hypctx->dbgclaimset_el1 = READ_SPECIALREG(dbgclaimset_el1);
124
125
dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
126
switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
127
#define STORE_DBG_BRP(x) \
128
case x: \
129
hypctx->dbgbcr_el1[x] = \
130
READ_SPECIALREG(dbgbcr ## x ## _el1); \
131
hypctx->dbgbvr_el1[x] = \
132
READ_SPECIALREG(dbgbvr ## x ## _el1)
133
STORE_DBG_BRP(15);
134
STORE_DBG_BRP(14);
135
STORE_DBG_BRP(13);
136
STORE_DBG_BRP(12);
137
STORE_DBG_BRP(11);
138
STORE_DBG_BRP(10);
139
STORE_DBG_BRP(9);
140
STORE_DBG_BRP(8);
141
STORE_DBG_BRP(7);
142
STORE_DBG_BRP(6);
143
STORE_DBG_BRP(5);
144
STORE_DBG_BRP(4);
145
STORE_DBG_BRP(3);
146
STORE_DBG_BRP(2);
147
STORE_DBG_BRP(1);
148
default:
149
STORE_DBG_BRP(0);
150
#undef STORE_DBG_BRP
151
}
152
153
switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
154
#define STORE_DBG_WRP(x) \
155
case x: \
156
hypctx->dbgwcr_el1[x] = \
157
READ_SPECIALREG(dbgwcr ## x ## _el1); \
158
hypctx->dbgwvr_el1[x] = \
159
READ_SPECIALREG(dbgwvr ## x ## _el1)
160
STORE_DBG_WRP(15);
161
STORE_DBG_WRP(14);
162
STORE_DBG_WRP(13);
163
STORE_DBG_WRP(12);
164
STORE_DBG_WRP(11);
165
STORE_DBG_WRP(10);
166
STORE_DBG_WRP(9);
167
STORE_DBG_WRP(8);
168
STORE_DBG_WRP(7);
169
STORE_DBG_WRP(6);
170
STORE_DBG_WRP(5);
171
STORE_DBG_WRP(4);
172
STORE_DBG_WRP(3);
173
STORE_DBG_WRP(2);
174
STORE_DBG_WRP(1);
175
default:
176
STORE_DBG_WRP(0);
177
#undef STORE_DBG_WRP
178
}
179
180
/* Store the PMU registers */
181
hypctx->pmcr_el0 = READ_SPECIALREG(pmcr_el0);
182
hypctx->pmccntr_el0 = READ_SPECIALREG(pmccntr_el0);
183
hypctx->pmccfiltr_el0 = READ_SPECIALREG(pmccfiltr_el0);
184
hypctx->pmuserenr_el0 = READ_SPECIALREG(pmuserenr_el0);
185
hypctx->pmselr_el0 = READ_SPECIALREG(pmselr_el0);
186
hypctx->pmxevcntr_el0 = READ_SPECIALREG(pmxevcntr_el0);
187
hypctx->pmcntenset_el0 = READ_SPECIALREG(pmcntenset_el0);
188
hypctx->pmintenset_el1 = READ_SPECIALREG(pmintenset_el1);
189
hypctx->pmovsset_el0 = READ_SPECIALREG(pmovsset_el0);
190
191
switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
192
#define STORE_PMU(x) \
193
case (x + 1): \
194
hypctx->pmevcntr_el0[x] = \
195
READ_SPECIALREG(pmevcntr ## x ## _el0); \
196
hypctx->pmevtyper_el0[x] = \
197
READ_SPECIALREG(pmevtyper ## x ## _el0)
198
STORE_PMU(30);
199
STORE_PMU(29);
200
STORE_PMU(28);
201
STORE_PMU(27);
202
STORE_PMU(26);
203
STORE_PMU(25);
204
STORE_PMU(24);
205
STORE_PMU(23);
206
STORE_PMU(22);
207
STORE_PMU(21);
208
STORE_PMU(20);
209
STORE_PMU(19);
210
STORE_PMU(18);
211
STORE_PMU(17);
212
STORE_PMU(16);
213
STORE_PMU(15);
214
STORE_PMU(14);
215
STORE_PMU(13);
216
STORE_PMU(12);
217
STORE_PMU(11);
218
STORE_PMU(10);
219
STORE_PMU(9);
220
STORE_PMU(8);
221
STORE_PMU(7);
222
STORE_PMU(6);
223
STORE_PMU(5);
224
STORE_PMU(4);
225
STORE_PMU(3);
226
STORE_PMU(2);
227
STORE_PMU(1);
228
STORE_PMU(0);
229
default: /* N == 0 when only PMCCNTR_EL0 is available */
230
break;
231
#undef STORE_PMU
232
}
233
234
/* Store the special to from the trapframe */
235
hypctx->tf.tf_sp = READ_SPECIALREG(sp_el1);
236
hypctx->tf.tf_elr = READ_SPECIALREG(elr_el2);
237
hypctx->tf.tf_spsr = READ_SPECIALREG(spsr_el2);
238
if (guest) {
239
hypctx->tf.tf_esr = READ_SPECIALREG(esr_el2);
240
hypctx->par_el1 = READ_SPECIALREG(par_el1);
241
}
242
243
/* Store the guest special registers */
244
hypctx->sp_el0 = READ_SPECIALREG(sp_el0);
245
hypctx->tpidr_el0 = READ_SPECIALREG(tpidr_el0);
246
hypctx->tpidrro_el0 = READ_SPECIALREG(tpidrro_el0);
247
hypctx->tpidr_el1 = READ_SPECIALREG(tpidr_el1);
248
249
hypctx->actlr_el1 = READ_SPECIALREG(actlr_el1);
250
hypctx->csselr_el1 = READ_SPECIALREG(csselr_el1);
251
hypctx->mdccint_el1 = READ_SPECIALREG(mdccint_el1);
252
hypctx->mdscr_el1 = READ_SPECIALREG(mdscr_el1);
253
254
if (guest_or_nonvhe(guest)) {
255
hypctx->elr_el1 = READ_SPECIALREG(EL1_REG(ELR));
256
hypctx->vbar_el1 = READ_SPECIALREG(EL1_REG(VBAR));
257
258
hypctx->afsr0_el1 = READ_SPECIALREG(EL1_REG(AFSR0));
259
hypctx->afsr1_el1 = READ_SPECIALREG(EL1_REG(AFSR1));
260
hypctx->amair_el1 = READ_SPECIALREG(EL1_REG(AMAIR));
261
hypctx->contextidr_el1 = READ_SPECIALREG(EL1_REG(CONTEXTIDR));
262
hypctx->cpacr_el1 = READ_SPECIALREG(EL1_REG(CPACR));
263
hypctx->esr_el1 = READ_SPECIALREG(EL1_REG(ESR));
264
hypctx->far_el1 = READ_SPECIALREG(EL1_REG(FAR));
265
hypctx->mair_el1 = READ_SPECIALREG(EL1_REG(MAIR));
266
hypctx->sctlr_el1 = READ_SPECIALREG(EL1_REG(SCTLR));
267
hypctx->spsr_el1 = READ_SPECIALREG(EL1_REG(SPSR));
268
hypctx->tcr_el1 = READ_SPECIALREG(EL1_REG(TCR));
269
/* TODO: Support when this is not res0 */
270
hypctx->tcr2_el1 = 0;
271
hypctx->ttbr0_el1 = READ_SPECIALREG(EL1_REG(TTBR0));
272
hypctx->ttbr1_el1 = READ_SPECIALREG(EL1_REG(TTBR1));
273
}
274
275
hypctx->cptr_el2 = READ_SPECIALREG(cptr_el2);
276
hypctx->hcr_el2 = READ_SPECIALREG(hcr_el2);
277
hypctx->vpidr_el2 = READ_SPECIALREG(vpidr_el2);
278
hypctx->vmpidr_el2 = READ_SPECIALREG(vmpidr_el2);
279
}
280
281
static void
282
vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest,
283
bool ecv_poff)
284
{
285
uint64_t dfr0;
286
287
/* Restore the special registers */
288
WRITE_SPECIALREG(hcr_el2, hypctx->hcr_el2);
289
290
if (guest) {
291
if ((hyp->feats & HYP_FEAT_HCX) != 0)
292
WRITE_SPECIALREG(HCRX_EL2_REG, hypctx->hcrx_el2);
293
}
294
isb();
295
296
#ifdef VMM_VHE
297
if (guest) {
298
/* Fine-grained trap controls */
299
if ((hyp->feats & HYP_FEAT_FGT) != 0) {
300
WRITE_SPECIALREG(HDFGWTR_EL2_REG, hypctx->hdfgwtr_el2);
301
WRITE_SPECIALREG(HFGITR_EL2_REG, hypctx->hfgitr_el2);
302
WRITE_SPECIALREG(HFGRTR_EL2_REG, hypctx->hfgrtr_el2);
303
WRITE_SPECIALREG(HFGWTR_EL2_REG, hypctx->hfgwtr_el2);
304
}
305
306
if ((hyp->feats & HYP_FEAT_FGT2) != 0) {
307
WRITE_SPECIALREG(HDFGRTR2_EL2_REG,
308
hypctx->hdfgrtr2_el2);
309
WRITE_SPECIALREG(HDFGWTR2_EL2_REG,
310
hypctx->hdfgwtr2_el2);
311
WRITE_SPECIALREG(HFGITR2_EL2_REG, hypctx->hfgitr2_el2);
312
WRITE_SPECIALREG(HFGRTR2_EL2_REG, hypctx->hfgrtr2_el2);
313
WRITE_SPECIALREG(HFGWTR2_EL2_REG, hypctx->hfgwtr2_el2);
314
}
315
}
316
#endif
317
318
WRITE_SPECIALREG(sp_el0, hypctx->sp_el0);
319
WRITE_SPECIALREG(tpidr_el0, hypctx->tpidr_el0);
320
WRITE_SPECIALREG(tpidrro_el0, hypctx->tpidrro_el0);
321
WRITE_SPECIALREG(tpidr_el1, hypctx->tpidr_el1);
322
323
WRITE_SPECIALREG(actlr_el1, hypctx->actlr_el1);
324
WRITE_SPECIALREG(csselr_el1, hypctx->csselr_el1);
325
WRITE_SPECIALREG(mdccint_el1, hypctx->mdccint_el1);
326
WRITE_SPECIALREG(mdscr_el1, hypctx->mdscr_el1);
327
328
if (guest_or_nonvhe(guest)) {
329
WRITE_SPECIALREG(EL1_REG(ELR), hypctx->elr_el1);
330
WRITE_SPECIALREG(EL1_REG(VBAR), hypctx->vbar_el1);
331
332
WRITE_SPECIALREG(EL1_REG(AFSR0), hypctx->afsr0_el1);
333
WRITE_SPECIALREG(EL1_REG(AFSR1), hypctx->afsr1_el1);
334
WRITE_SPECIALREG(EL1_REG(AMAIR), hypctx->amair_el1);
335
WRITE_SPECIALREG(EL1_REG(CONTEXTIDR), hypctx->contextidr_el1);
336
WRITE_SPECIALREG(EL1_REG(CPACR), hypctx->cpacr_el1);
337
WRITE_SPECIALREG(EL1_REG(ESR), hypctx->esr_el1);
338
WRITE_SPECIALREG(EL1_REG(FAR), hypctx->far_el1);
339
WRITE_SPECIALREG(EL1_REG(MAIR), hypctx->mair_el1); //
340
341
WRITE_SPECIALREG(EL1_REG(SCTLR), hypctx->sctlr_el1);
342
WRITE_SPECIALREG(EL1_REG(SPSR), hypctx->spsr_el1);
343
WRITE_SPECIALREG(EL1_REG(TCR), hypctx->tcr_el1);
344
/* TODO: tcr2_el1 */
345
WRITE_SPECIALREG(EL1_REG(TTBR0), hypctx->ttbr0_el1);
346
WRITE_SPECIALREG(EL1_REG(TTBR1), hypctx->ttbr1_el1);
347
}
348
349
if (guest) {
350
WRITE_SPECIALREG(par_el1, hypctx->par_el1);
351
}
352
353
WRITE_SPECIALREG(cptr_el2, hypctx->cptr_el2);
354
WRITE_SPECIALREG(vpidr_el2, hypctx->vpidr_el2);
355
WRITE_SPECIALREG(vmpidr_el2, hypctx->vmpidr_el2);
356
357
/* Load the special regs from the trapframe */
358
WRITE_SPECIALREG(sp_el1, hypctx->tf.tf_sp);
359
WRITE_SPECIALREG(elr_el2, hypctx->tf.tf_elr);
360
WRITE_SPECIALREG(spsr_el2, hypctx->tf.tf_spsr);
361
362
/* Restore the PMU registers */
363
WRITE_SPECIALREG(pmcr_el0, hypctx->pmcr_el0);
364
WRITE_SPECIALREG(pmccntr_el0, hypctx->pmccntr_el0);
365
WRITE_SPECIALREG(pmccfiltr_el0, hypctx->pmccfiltr_el0);
366
WRITE_SPECIALREG(pmuserenr_el0, hypctx->pmuserenr_el0);
367
WRITE_SPECIALREG(pmselr_el0, hypctx->pmselr_el0);
368
WRITE_SPECIALREG(pmxevcntr_el0, hypctx->pmxevcntr_el0);
369
/* Clear all events/interrupts then enable them */
370
WRITE_SPECIALREG(pmcntenclr_el0, ~0ul);
371
WRITE_SPECIALREG(pmcntenset_el0, hypctx->pmcntenset_el0);
372
WRITE_SPECIALREG(pmintenclr_el1, ~0ul);
373
WRITE_SPECIALREG(pmintenset_el1, hypctx->pmintenset_el1);
374
WRITE_SPECIALREG(pmovsclr_el0, ~0ul);
375
WRITE_SPECIALREG(pmovsset_el0, hypctx->pmovsset_el0);
376
377
switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
378
#define LOAD_PMU(x) \
379
case (x + 1): \
380
WRITE_SPECIALREG(pmevcntr ## x ## _el0, \
381
hypctx->pmevcntr_el0[x]); \
382
WRITE_SPECIALREG(pmevtyper ## x ## _el0, \
383
hypctx->pmevtyper_el0[x])
384
LOAD_PMU(30);
385
LOAD_PMU(29);
386
LOAD_PMU(28);
387
LOAD_PMU(27);
388
LOAD_PMU(26);
389
LOAD_PMU(25);
390
LOAD_PMU(24);
391
LOAD_PMU(23);
392
LOAD_PMU(22);
393
LOAD_PMU(21);
394
LOAD_PMU(20);
395
LOAD_PMU(19);
396
LOAD_PMU(18);
397
LOAD_PMU(17);
398
LOAD_PMU(16);
399
LOAD_PMU(15);
400
LOAD_PMU(14);
401
LOAD_PMU(13);
402
LOAD_PMU(12);
403
LOAD_PMU(11);
404
LOAD_PMU(10);
405
LOAD_PMU(9);
406
LOAD_PMU(8);
407
LOAD_PMU(7);
408
LOAD_PMU(6);
409
LOAD_PMU(5);
410
LOAD_PMU(4);
411
LOAD_PMU(3);
412
LOAD_PMU(2);
413
LOAD_PMU(1);
414
LOAD_PMU(0);
415
default: /* N == 0 when only PMCCNTR_EL0 is available */
416
break;
417
#undef LOAD_PMU
418
}
419
420
WRITE_SPECIALREG(dbgclaimclr_el1, ~0ul);
421
WRITE_SPECIALREG(dbgclaimclr_el1, hypctx->dbgclaimset_el1);
422
423
dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
424
switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
425
#define LOAD_DBG_BRP(x) \
426
case x: \
427
WRITE_SPECIALREG(dbgbcr ## x ## _el1, \
428
hypctx->dbgbcr_el1[x]); \
429
WRITE_SPECIALREG(dbgbvr ## x ## _el1, \
430
hypctx->dbgbvr_el1[x])
431
LOAD_DBG_BRP(15);
432
LOAD_DBG_BRP(14);
433
LOAD_DBG_BRP(13);
434
LOAD_DBG_BRP(12);
435
LOAD_DBG_BRP(11);
436
LOAD_DBG_BRP(10);
437
LOAD_DBG_BRP(9);
438
LOAD_DBG_BRP(8);
439
LOAD_DBG_BRP(7);
440
LOAD_DBG_BRP(6);
441
LOAD_DBG_BRP(5);
442
LOAD_DBG_BRP(4);
443
LOAD_DBG_BRP(3);
444
LOAD_DBG_BRP(2);
445
LOAD_DBG_BRP(1);
446
default:
447
LOAD_DBG_BRP(0);
448
#undef LOAD_DBG_BRP
449
}
450
451
switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
452
#define LOAD_DBG_WRP(x) \
453
case x: \
454
WRITE_SPECIALREG(dbgwcr ## x ## _el1, \
455
hypctx->dbgwcr_el1[x]); \
456
WRITE_SPECIALREG(dbgwvr ## x ## _el1, \
457
hypctx->dbgwvr_el1[x])
458
LOAD_DBG_WRP(15);
459
LOAD_DBG_WRP(14);
460
LOAD_DBG_WRP(13);
461
LOAD_DBG_WRP(12);
462
LOAD_DBG_WRP(11);
463
LOAD_DBG_WRP(10);
464
LOAD_DBG_WRP(9);
465
LOAD_DBG_WRP(8);
466
LOAD_DBG_WRP(7);
467
LOAD_DBG_WRP(6);
468
LOAD_DBG_WRP(5);
469
LOAD_DBG_WRP(4);
470
LOAD_DBG_WRP(3);
471
LOAD_DBG_WRP(2);
472
LOAD_DBG_WRP(1);
473
default:
474
LOAD_DBG_WRP(0);
475
#undef LOAD_DBG_WRP
476
}
477
478
if (guest) {
479
/* Load the timer registers */
480
WRITE_SPECIALREG(EL1_REG(CNTKCTL),
481
hypctx->vtimer_cpu.cntkctl_el1);
482
WRITE_SPECIALREG(EL0_REG(CNTV_CVAL),
483
hypctx->vtimer_cpu.virt_timer.cntx_cval_el0);
484
WRITE_SPECIALREG(EL0_REG(CNTV_CTL),
485
hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0);
486
WRITE_SPECIALREG(cnthctl_el2, hyp->vtimer.cnthctl_el2);
487
WRITE_SPECIALREG(cntvoff_el2, hyp->vtimer.cntvoff_el2);
488
489
if (ecv_poff) {
490
/*
491
* Load the same offset as the virtual timer
492
* to keep in sync.
493
*/
494
WRITE_SPECIALREG(CNTPOFF_EL2_REG,
495
hyp->vtimer.cntvoff_el2);
496
isb();
497
}
498
}
499
if (guest_or_nonvhe(guest) && ecv_poff) {
500
/*
501
* If we have ECV then the guest could modify these registers.
502
* If VHE is enabled then the kernel will see a different view
503
* of the registers, so doesn't need to handle them.
504
*/
505
WRITE_SPECIALREG(EL0_REG(CNTP_CVAL),
506
hypctx->vtimer_cpu.phys_timer.cntx_cval_el0);
507
WRITE_SPECIALREG(EL0_REG(CNTP_CTL),
508
hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0);
509
}
510
511
if (guest) {
512
/* Load the GICv3 registers */
513
WRITE_SPECIALREG(ich_hcr_el2, hypctx->vgic_v3_regs.ich_hcr_el2);
514
WRITE_SPECIALREG(ich_vmcr_el2,
515
hypctx->vgic_v3_regs.ich_vmcr_el2);
516
switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
517
#define LOAD_LR(x) \
518
case x: \
519
WRITE_SPECIALREG(ich_lr ## x ##_el2, \
520
hypctx->vgic_v3_regs.ich_lr_el2[x])
521
LOAD_LR(15);
522
LOAD_LR(14);
523
LOAD_LR(13);
524
LOAD_LR(12);
525
LOAD_LR(11);
526
LOAD_LR(10);
527
LOAD_LR(9);
528
LOAD_LR(8);
529
LOAD_LR(7);
530
LOAD_LR(6);
531
LOAD_LR(5);
532
LOAD_LR(4);
533
LOAD_LR(3);
534
LOAD_LR(2);
535
LOAD_LR(1);
536
default:
537
LOAD_LR(0);
538
#undef LOAD_LR
539
}
540
541
switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
542
#define LOAD_APR(x) \
543
case x: \
544
WRITE_SPECIALREG(ich_ap0r ## x ##_el2, \
545
hypctx->vgic_v3_regs.ich_ap0r_el2[x]); \
546
WRITE_SPECIALREG(ich_ap1r ## x ##_el2, \
547
hypctx->vgic_v3_regs.ich_ap1r_el2[x])
548
LOAD_APR(3);
549
LOAD_APR(2);
550
LOAD_APR(1);
551
default:
552
LOAD_APR(0);
553
#undef LOAD_APR
554
}
555
}
556
}
557
558
static uint64_t
559
vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
560
{
561
struct hypctx host_hypctx;
562
uint64_t cntvoff_el2;
563
uint64_t ich_hcr_el2, ich_vmcr_el2, cnthctl_el2, cntkctl_el1;
564
#ifndef VMM_VHE
565
uint64_t hcrx_el2;
566
#endif
567
uint64_t ret;
568
uint64_t s1e1r, hpfar_el2;
569
bool ecv_poff, hpfar_valid;
570
571
ecv_poff = (hyp->vtimer.cnthctl_el2 & CNTHCTL_ECV_EN) != 0;
572
vmm_hyp_reg_store(&host_hypctx, NULL, false, ecv_poff);
573
#ifndef VMM_VHE
574
if ((hyp->feats & HYP_FEAT_HCX) != 0)
575
hcrx_el2 = READ_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2));
576
#endif
577
578
/* Save the host special registers */
579
cnthctl_el2 = READ_SPECIALREG(cnthctl_el2);
580
cntkctl_el1 = READ_SPECIALREG(cntkctl_el1);
581
cntvoff_el2 = READ_SPECIALREG(cntvoff_el2);
582
583
ich_hcr_el2 = READ_SPECIALREG(ich_hcr_el2);
584
ich_vmcr_el2 = READ_SPECIALREG(ich_vmcr_el2);
585
586
vmm_hyp_reg_restore(hypctx, hyp, true, ecv_poff);
587
588
/* Load the common hypervisor registers */
589
WRITE_SPECIALREG(vttbr_el2, hyp->vttbr_el2);
590
591
host_hypctx.mdcr_el2 = READ_SPECIALREG(mdcr_el2);
592
WRITE_SPECIALREG(mdcr_el2, hypctx->mdcr_el2);
593
594
/* Call into the guest */
595
ret = VMM_HYP_FUNC(do_call_guest)(hypctx);
596
597
WRITE_SPECIALREG(mdcr_el2, host_hypctx.mdcr_el2);
598
isb();
599
600
/* Store the exit info */
601
hypctx->exit_info.far_el2 = READ_SPECIALREG(far_el2);
602
vmm_hyp_reg_store(hypctx, hyp, true, ecv_poff);
603
604
hpfar_valid = true;
605
if (ret == EXCP_TYPE_EL1_SYNC) {
606
switch (ESR_ELx_EXCEPTION(hypctx->tf.tf_esr)) {
607
case EXCP_INSN_ABORT_L:
608
case EXCP_DATA_ABORT_L:
609
/*
610
* The hpfar_el2 register is valid for:
611
* - Translation and Access faults.
612
* - Translation, Access, and permission faults on
613
* the translation table walk on the stage 1 tables.
614
* - A stage 2 Address size fault.
615
*
616
* As we only need it in the first 2 cases we can just
617
* exclude it on permission faults that are not from
618
* the stage 1 table walk.
619
*
620
* TODO: Add a case for Arm erratum 834220.
621
*/
622
if ((hypctx->tf.tf_esr & ISS_DATA_S1PTW) != 0)
623
break;
624
switch (hypctx->tf.tf_esr & ISS_DATA_DFSC_MASK) {
625
case ISS_DATA_DFSC_PF_L1:
626
case ISS_DATA_DFSC_PF_L2:
627
case ISS_DATA_DFSC_PF_L3:
628
hpfar_valid = false;
629
break;
630
}
631
break;
632
}
633
}
634
if (hpfar_valid) {
635
hypctx->exit_info.hpfar_el2 = READ_SPECIALREG(hpfar_el2);
636
} else {
637
/*
638
* TODO: There is a risk the at instruction could cause an
639
* exception here. We should handle it & return a failure.
640
*/
641
s1e1r =
642
arm64_address_translate_s1e1r(hypctx->exit_info.far_el2);
643
if (PAR_SUCCESS(s1e1r)) {
644
hpfar_el2 = (s1e1r & PAR_PA_MASK) >> PAR_PA_SHIFT;
645
hpfar_el2 <<= HPFAR_EL2_FIPA_SHIFT;
646
hypctx->exit_info.hpfar_el2 = hpfar_el2;
647
} else {
648
ret = EXCP_TYPE_REENTER;
649
}
650
}
651
652
vmm_hyp_reg_restore(&host_hypctx, NULL, false, ecv_poff);
653
654
#ifndef VMM_VHE
655
if ((hyp->feats & HYP_FEAT_HCX) != 0)
656
WRITE_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2), hcrx_el2);
657
#endif
658
659
/* Restore the host special registers */
660
WRITE_SPECIALREG(ich_hcr_el2, ich_hcr_el2);
661
WRITE_SPECIALREG(ich_vmcr_el2, ich_vmcr_el2);
662
663
WRITE_SPECIALREG(cnthctl_el2, cnthctl_el2);
664
WRITE_SPECIALREG(cntkctl_el1, cntkctl_el1);
665
WRITE_SPECIALREG(cntvoff_el2, cntvoff_el2);
666
667
return (ret);
668
}
669
670
VMM_STATIC uint64_t
671
VMM_HYP_FUNC(enter_guest)(struct hyp *hyp, struct hypctx *hypctx)
672
{
673
uint64_t ret;
674
675
do {
676
ret = vmm_hyp_call_guest(hyp, hypctx);
677
} while (ret == EXCP_TYPE_REENTER);
678
679
return (ret);
680
}
681
682
VMM_STATIC uint64_t
683
VMM_HYP_FUNC(read_reg)(uint64_t reg)
684
{
685
switch (reg) {
686
case HYP_REG_ICH_VTR:
687
return (READ_SPECIALREG(ich_vtr_el2));
688
}
689
690
return (0);
691
}
692
693
VMM_STATIC void
694
VMM_HYP_FUNC(clean_s2_tlbi)(void)
695
{
696
dsb(ishst);
697
__asm __volatile("tlbi alle1is");
698
dsb(ish);
699
}
700
701
VMM_STATIC void
702
VMM_HYP_FUNC(s2_tlbi_range)(uint64_t vttbr, vm_offset_t sva, vm_offset_t eva,
703
bool final_only)
704
{
705
uint64_t end, r, start;
706
uint64_t host_vttbr;
707
#ifdef VMM_VHE
708
uint64_t host_tcr;
709
#endif
710
711
#ifdef VMM_VHE
712
dsb(ishst);
713
#endif
714
715
#define TLBI_VA_SHIFT 12
716
#define TLBI_VA_MASK ((1ul << 44) - 1)
717
#define TLBI_VA(addr) (((addr) >> TLBI_VA_SHIFT) & TLBI_VA_MASK)
718
#define TLBI_VA_L3_INCR (L3_SIZE >> TLBI_VA_SHIFT)
719
720
/* Switch to the guest vttbr */
721
/* TODO: Handle Cortex-A57/A72 erratum 131936 */
722
host_vttbr = READ_SPECIALREG(vttbr_el2);
723
WRITE_SPECIALREG(vttbr_el2, vttbr);
724
isb();
725
726
#ifdef VMM_VHE
727
host_tcr = READ_SPECIALREG(tcr_el2);
728
WRITE_SPECIALREG(tcr_el2, host_tcr & ~HCR_TGE);
729
isb();
730
#endif
731
732
/*
733
* The CPU can cache the stage 1 + 2 combination so we need to ensure
734
* the stage 2 is invalidated first, then when this has completed we
735
* invalidate the stage 1 TLB. As we don't know which stage 1 virtual
736
* addresses point at the stage 2 IPA we need to invalidate the entire
737
* stage 1 TLB.
738
*/
739
740
start = TLBI_VA(sva);
741
end = TLBI_VA(eva);
742
for (r = start; r < end; r += TLBI_VA_L3_INCR) {
743
/* Invalidate the stage 2 TLB entry */
744
if (final_only)
745
__asm __volatile("tlbi ipas2le1is, %0" : : "r"(r));
746
else
747
__asm __volatile("tlbi ipas2e1is, %0" : : "r"(r));
748
}
749
/* Ensure the entry has been invalidated */
750
dsb(ish);
751
/* Invalidate the stage 1 TLB. */
752
__asm __volatile("tlbi vmalle1is");
753
dsb(ish);
754
isb();
755
756
#ifdef VMM_VHE
757
WRITE_SPECIALREG(tcr_el2, host_tcr);
758
isb();
759
#endif
760
761
/* Switch back to the host vttbr */
762
WRITE_SPECIALREG(vttbr_el2, host_vttbr);
763
isb();
764
}
765
766
VMM_STATIC void
767
VMM_HYP_FUNC(s2_tlbi_all)(uint64_t vttbr)
768
{
769
uint64_t host_vttbr;
770
771
#ifdef VMM_VHE
772
dsb(ishst);
773
#endif
774
775
/* Switch to the guest vttbr */
776
/* TODO: Handle Cortex-A57/A72 erratum 131936 */
777
host_vttbr = READ_SPECIALREG(vttbr_el2);
778
WRITE_SPECIALREG(vttbr_el2, vttbr);
779
isb();
780
781
__asm __volatile("tlbi vmalls12e1is");
782
dsb(ish);
783
isb();
784
785
/* Switch back t othe host vttbr */
786
WRITE_SPECIALREG(vttbr_el2, host_vttbr);
787
isb();
788
}
789
790