Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/arm64/vmm/vmm_reset.c
39478 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (C) 2018 Alexandru Elisei <[email protected]>
5
*
6
* Redistribution and use in source and binary forms, with or without
7
* modification, are permitted provided that the following conditions
8
* are met:
9
* 1. Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
*
15
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
19
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25
* SUCH DAMAGE.
26
*/
27
28
#include <sys/cdefs.h>
29
#include <sys/types.h>
30
#include <sys/systm.h>
31
#include <sys/kernel.h>
32
#include <sys/lock.h>
33
34
#include <machine/armreg.h>
35
#include <machine/cpu.h>
36
#include <machine/hypervisor.h>
37
38
#include "arm64.h"
39
#include "reset.h"
40
41
/*
42
* Make the architecturally UNKNOWN value 0. As a bonus, we don't have to
43
* manually set all those RES0 fields.
44
*/
45
#define ARCH_UNKNOWN 0
46
#define set_arch_unknown(reg) (memset(&(reg), ARCH_UNKNOWN, sizeof(reg)))
47
48
void
49
reset_vm_el01_regs(void *vcpu)
50
{
51
struct hypctx *el2ctx;
52
53
el2ctx = vcpu;
54
55
set_arch_unknown(el2ctx->tf);
56
57
set_arch_unknown(el2ctx->actlr_el1);
58
set_arch_unknown(el2ctx->afsr0_el1);
59
set_arch_unknown(el2ctx->afsr1_el1);
60
set_arch_unknown(el2ctx->amair_el1);
61
set_arch_unknown(el2ctx->contextidr_el1);
62
set_arch_unknown(el2ctx->cpacr_el1);
63
set_arch_unknown(el2ctx->csselr_el1);
64
set_arch_unknown(el2ctx->elr_el1);
65
set_arch_unknown(el2ctx->esr_el1);
66
set_arch_unknown(el2ctx->far_el1);
67
set_arch_unknown(el2ctx->mair_el1);
68
set_arch_unknown(el2ctx->mdccint_el1);
69
set_arch_unknown(el2ctx->mdscr_el1);
70
set_arch_unknown(el2ctx->par_el1);
71
72
/*
73
* Guest starts with:
74
* ~SCTLR_M: MMU off
75
* ~SCTLR_C: data cache off
76
* SCTLR_CP15BEN: memory barrier instruction enable from EL0; RAO/WI
77
* ~SCTLR_I: instruction cache off
78
*/
79
el2ctx->sctlr_el1 = SCTLR_RES1;
80
el2ctx->sctlr_el1 &= ~SCTLR_M & ~SCTLR_C & ~SCTLR_I;
81
el2ctx->sctlr_el1 |= SCTLR_CP15BEN;
82
83
set_arch_unknown(el2ctx->sp_el0);
84
set_arch_unknown(el2ctx->tcr_el1);
85
set_arch_unknown(el2ctx->tpidr_el0);
86
set_arch_unknown(el2ctx->tpidr_el1);
87
set_arch_unknown(el2ctx->tpidrro_el0);
88
set_arch_unknown(el2ctx->ttbr0_el1);
89
set_arch_unknown(el2ctx->ttbr1_el1);
90
set_arch_unknown(el2ctx->vbar_el1);
91
set_arch_unknown(el2ctx->spsr_el1);
92
93
set_arch_unknown(el2ctx->dbgbcr_el1);
94
set_arch_unknown(el2ctx->dbgbvr_el1);
95
set_arch_unknown(el2ctx->dbgwcr_el1);
96
set_arch_unknown(el2ctx->dbgwvr_el1);
97
98
el2ctx->pmcr_el0 = READ_SPECIALREG(pmcr_el0) & PMCR_N_MASK;
99
/* PMCR_LC is unknown when AArch32 is supported or RES1 otherwise */
100
el2ctx->pmcr_el0 |= PMCR_LC;
101
set_arch_unknown(el2ctx->pmccntr_el0);
102
set_arch_unknown(el2ctx->pmccfiltr_el0);
103
set_arch_unknown(el2ctx->pmuserenr_el0);
104
set_arch_unknown(el2ctx->pmselr_el0);
105
set_arch_unknown(el2ctx->pmxevcntr_el0);
106
set_arch_unknown(el2ctx->pmcntenset_el0);
107
set_arch_unknown(el2ctx->pmintenset_el1);
108
set_arch_unknown(el2ctx->pmovsset_el0);
109
memset(el2ctx->pmevcntr_el0, 0, sizeof(el2ctx->pmevcntr_el0));
110
memset(el2ctx->pmevtyper_el0, 0, sizeof(el2ctx->pmevtyper_el0));
111
}
112
113
void
114
reset_vm_el2_regs(void *vcpu)
115
{
116
struct hypctx *el2ctx;
117
uint64_t cpu_aff, vcpuid;
118
119
el2ctx = vcpu;
120
vcpuid = vcpu_vcpuid(el2ctx->vcpu);
121
122
/*
123
* Set the Hypervisor Configuration Register:
124
*
125
* HCR_RW: use AArch64 for EL1
126
* HCR_TID3: handle ID registers in the vmm to privide a common
127
* set of featers on all vcpus
128
* HCR_TWI: Trap WFI to the hypervisor
129
* HCR_BSU_IS: barrier instructions apply to the inner shareable
130
* domain
131
* HCR_FB: broadcast maintenance operations
132
* HCR_AMO: route physical SError interrupts to EL2
133
* HCR_IMO: route physical IRQ interrupts to EL2
134
* HCR_FMO: route physical FIQ interrupts to EL2
135
* HCR_SWIO: turn set/way invalidate into set/way clean and
136
* invalidate
137
* HCR_VM: use stage 2 translation
138
*/
139
el2ctx->hcr_el2 = HCR_RW | HCR_TID3 | HCR_TWI | HCR_BSU_IS | HCR_FB |
140
HCR_AMO | HCR_IMO | HCR_FMO | HCR_SWIO | HCR_VM;
141
if (in_vhe()) {
142
el2ctx->hcr_el2 |= HCR_E2H;
143
}
144
145
/* Set the Extended Hypervisor Configuration Register */
146
el2ctx->hcrx_el2 = 0;
147
/* TODO: Trap all extensions we don't support */
148
el2ctx->mdcr_el2 = MDCR_EL2_TDOSA | MDCR_EL2_TDRA | MDCR_EL2_TPMS |
149
MDCR_EL2_TTRF;
150
/* PMCR_EL0.N is read from MDCR_EL2.HPMN */
151
el2ctx->mdcr_el2 |= (el2ctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT;
152
153
el2ctx->vmpidr_el2 = VMPIDR_EL2_RES1;
154
/* The guest will detect a multi-core, single-threaded CPU */
155
el2ctx->vmpidr_el2 &= ~VMPIDR_EL2_U & ~VMPIDR_EL2_MT;
156
/*
157
* Generate the guest MPIDR value. We only support 16 CPUs at affinity
158
* level 0 to simplify the vgicv3 driver (see writing sgi1r_el1).
159
*/
160
cpu_aff = (vcpuid & 0xf) << MPIDR_AFF0_SHIFT |
161
((vcpuid >> 4) & 0xff) << MPIDR_AFF1_SHIFT |
162
((vcpuid >> 12) & 0xff) << MPIDR_AFF2_SHIFT |
163
((vcpuid >> 20) & 0xff) << MPIDR_AFF3_SHIFT;
164
el2ctx->vmpidr_el2 |= cpu_aff;
165
166
/* Use the same CPU identification information as the host */
167
el2ctx->vpidr_el2 = CPU_IMPL_TO_MIDR(CPU_IMPL_ARM);
168
el2ctx->vpidr_el2 |= CPU_VAR_TO_MIDR(0);
169
el2ctx->vpidr_el2 |= CPU_ARCH_TO_MIDR(0xf);
170
el2ctx->vpidr_el2 |= CPU_PART_TO_MIDR(CPU_PART_FOUNDATION);
171
el2ctx->vpidr_el2 |= CPU_REV_TO_MIDR(0);
172
173
/*
174
* Don't trap accesses to CPACR_EL1, trace, SVE, Advanced SIMD
175
* and floating point functionality to EL2.
176
*/
177
if (in_vhe())
178
el2ctx->cptr_el2 = CPTR_E2H_TRAP_ALL | CPTR_E2H_FPEN;
179
else
180
el2ctx->cptr_el2 = CPTR_TRAP_ALL & ~CPTR_TFP;
181
el2ctx->cptr_el2 &= ~CPTR_TCPAC;
182
/*
183
* Disable interrupts in the guest. The guest OS will re-enable
184
* them.
185
*/
186
el2ctx->tf.tf_spsr = PSR_D | PSR_A | PSR_I | PSR_F;
187
/* Use the EL1 stack when taking exceptions to EL1 */
188
el2ctx->tf.tf_spsr |= PSR_M_EL1h;
189
}
190
191