Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/arm64/vmm/io/vgic_v3.c
108680 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (C) 2018 Alexandru Elisei <[email protected]>
5
* Copyright (C) 2020-2022 Andrew Turner
6
* Copyright (C) 2023 Arm Ltd
7
*
8
* Redistribution and use in source and binary forms, with or without
9
* modification, are permitted provided that the following conditions
10
* are met:
11
* 1. Redistributions of source code must retain the above copyright
12
* notice, this list of conditions and the following disclaimer.
13
* 2. Redistributions in binary form must reproduce the above copyright
14
* notice, this list of conditions and the following disclaimer in the
15
* documentation and/or other materials provided with the distribution.
16
*
17
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
* SUCH DAMAGE.
28
*/
29
30
#include <sys/cdefs.h>
31
32
#include <sys/types.h>
33
#include <sys/errno.h>
34
#include <sys/systm.h>
35
#include <sys/bitstring.h>
36
#include <sys/bus.h>
37
#include <sys/kernel.h>
38
#include <sys/lock.h>
39
#include <sys/malloc.h>
40
#include <sys/module.h>
41
#include <sys/mutex.h>
42
#include <sys/rman.h>
43
#include <sys/smp.h>
44
45
#include <vm/vm.h>
46
#include <vm/pmap.h>
47
48
#include <dev/ofw/openfirm.h>
49
50
#include <machine/atomic.h>
51
#include <machine/bus.h>
52
#include <machine/cpufunc.h>
53
#include <machine/cpu.h>
54
#include <machine/machdep.h>
55
#include <machine/param.h>
56
#include <machine/pmap.h>
57
#include <machine/vmparam.h>
58
#include <machine/intr.h>
59
#include <machine/vmm.h>
60
#include <machine/vmm_instruction_emul.h>
61
62
#include <arm/arm/gic_common.h>
63
#include <arm64/arm64/gic_v3_reg.h>
64
#include <arm64/arm64/gic_v3_var.h>
65
66
#include <arm64/vmm/hyp.h>
67
#include <arm64/vmm/mmu.h>
68
#include <arm64/vmm/arm64.h>
69
#include <arm64/vmm/vmm_handlers.h>
70
71
#include <dev/vmm/vmm_dev.h>
72
#include <dev/vmm/vmm_vm.h>
73
74
#include "vgic.h"
75
#include "vgic_v3.h"
76
#include "vgic_v3_reg.h"
77
78
#include "vgic_if.h"
79
80
#define VGIC_SGI_NUM (GIC_LAST_SGI - GIC_FIRST_SGI + 1)
81
#define VGIC_PPI_NUM (GIC_LAST_PPI - GIC_FIRST_PPI + 1)
82
#define VGIC_SPI_NUM (GIC_LAST_SPI - GIC_FIRST_SPI + 1)
83
#define VGIC_PRV_I_NUM (VGIC_SGI_NUM + VGIC_PPI_NUM)
84
#define VGIC_SHR_I_NUM (VGIC_SPI_NUM)
85
86
MALLOC_DEFINE(M_VGIC_V3, "ARM VMM VGIC V3", "ARM VMM VGIC V3");
87
88
/* TODO: Move to softc */
89
struct vgic_v3_virt_features {
90
uint8_t min_prio;
91
size_t ich_lr_num;
92
size_t ich_apr_num;
93
};
94
95
struct vgic_v3_irq {
96
/* List of IRQs that are active or pending */
97
TAILQ_ENTRY(vgic_v3_irq) act_pend_list;
98
struct mtx irq_spinmtx;
99
uint64_t mpidr;
100
int target_vcpu;
101
uint32_t irq;
102
bool active;
103
bool pending;
104
bool enabled;
105
bool level;
106
bool on_aplist;
107
uint8_t priority;
108
uint8_t config;
109
#define VGIC_CONFIG_MASK 0x2
110
#define VGIC_CONFIG_LEVEL 0x0
111
#define VGIC_CONFIG_EDGE 0x2
112
};
113
114
/* Global data not needed by EL2 */
115
struct vgic_v3 {
116
struct mtx dist_mtx;
117
uint64_t dist_start;
118
size_t dist_end;
119
120
uint64_t redist_start;
121
size_t redist_end;
122
123
uint32_t gicd_ctlr; /* Distributor Control Register */
124
125
struct vgic_v3_irq *irqs;
126
};
127
128
/* Per-CPU data not needed by EL2 */
129
struct vgic_v3_cpu {
130
/*
131
* We need a mutex for accessing the list registers because they are
132
* modified asynchronously by the virtual timer.
133
*
134
* Note that the mutex *MUST* be a spin mutex because an interrupt can
135
* be injected by a callout callback function, thereby modifying the
136
* list registers from a context where sleeping is forbidden.
137
*/
138
struct mtx lr_mtx;
139
140
struct vgic_v3_irq private_irqs[VGIC_PRV_I_NUM];
141
TAILQ_HEAD(, vgic_v3_irq) irq_act_pend;
142
u_int ich_lr_used;
143
};
144
145
/* How many IRQs we support (SGIs + PPIs + SPIs). Not including LPIs */
146
#define VGIC_NIRQS 1023
147
/* Pretend to be an Arm design */
148
#define VGIC_IIDR 0x43b
149
150
static vgic_inject_irq_t vgic_v3_inject_irq;
151
static vgic_inject_msi_t vgic_v3_inject_msi;
152
153
static int vgic_v3_max_cpu_count(device_t dev, struct hyp *hyp);
154
155
#define INJECT_IRQ(hyp, vcpuid, irqid, level) \
156
vgic_v3_inject_irq(NULL, (hyp), (vcpuid), (irqid), (level))
157
158
typedef void (register_read)(struct hypctx *, u_int, uint64_t *, void *);
159
typedef void (register_write)(struct hypctx *, u_int, u_int, u_int,
160
uint64_t, void *);
161
162
#define VGIC_8_BIT (1 << 0)
163
/* (1 << 1) is reserved for 16 bit accesses */
164
#define VGIC_32_BIT (1 << 2)
165
#define VGIC_64_BIT (1 << 3)
166
167
struct vgic_register {
168
u_int start; /* Start within a memory region */
169
u_int end;
170
u_int size;
171
u_int flags;
172
register_read *read;
173
register_write *write;
174
};
175
176
#define VGIC_REGISTER_RANGE(reg_start, reg_end, reg_size, reg_flags, readf, \
177
writef) \
178
{ \
179
.start = (reg_start), \
180
.end = (reg_end), \
181
.size = (reg_size), \
182
.flags = (reg_flags), \
183
.read = (readf), \
184
.write = (writef), \
185
}
186
187
#define VGIC_REGISTER_RANGE_RAZ_WI(reg_start, reg_end, reg_size, reg_flags) \
188
VGIC_REGISTER_RANGE(reg_start, reg_end, reg_size, reg_flags, \
189
gic_zero_read, gic_ignore_write)
190
191
#define VGIC_REGISTER(start_addr, reg_size, reg_flags, readf, writef) \
192
VGIC_REGISTER_RANGE(start_addr, (start_addr) + (reg_size), \
193
reg_size, reg_flags, readf, writef)
194
195
#define VGIC_REGISTER_RAZ_WI(start_addr, reg_size, reg_flags) \
196
VGIC_REGISTER_RANGE_RAZ_WI(start_addr, \
197
(start_addr) + (reg_size), reg_size, reg_flags)
198
199
static register_read gic_pidr2_read;
200
static register_read gic_zero_read;
201
static register_write gic_ignore_write;
202
203
/* GICD_CTLR */
204
static register_read dist_ctlr_read;
205
static register_write dist_ctlr_write;
206
/* GICD_TYPER */
207
static register_read dist_typer_read;
208
/* GICD_IIDR */
209
static register_read dist_iidr_read;
210
/* GICD_STATUSR - RAZ/WI as we don't report errors (yet) */
211
/* GICD_SETSPI_NSR & GICD_CLRSPI_NSR */
212
static register_write dist_setclrspi_nsr_write;
213
/* GICD_SETSPI_SR - RAZ/WI */
214
/* GICD_CLRSPI_SR - RAZ/WI */
215
/* GICD_IGROUPR - RAZ/WI as GICD_CTLR.ARE == 1 */
216
/* GICD_ISENABLER */
217
static register_read dist_isenabler_read;
218
static register_write dist_isenabler_write;
219
/* GICD_ICENABLER */
220
static register_read dist_icenabler_read;
221
static register_write dist_icenabler_write;
222
/* GICD_ISPENDR */
223
static register_read dist_ispendr_read;
224
static register_write dist_ispendr_write;
225
/* GICD_ICPENDR */
226
static register_read dist_icpendr_read;
227
static register_write dist_icpendr_write;
228
/* GICD_ISACTIVER */
229
static register_read dist_isactiver_read;
230
static register_write dist_isactiver_write;
231
/* GICD_ICACTIVER */
232
static register_read dist_icactiver_read;
233
static register_write dist_icactiver_write;
234
/* GICD_IPRIORITYR */
235
static register_read dist_ipriorityr_read;
236
static register_write dist_ipriorityr_write;
237
/* GICD_ITARGETSR - RAZ/WI as GICD_CTLR.ARE == 1 */
238
/* GICD_ICFGR */
239
static register_read dist_icfgr_read;
240
static register_write dist_icfgr_write;
241
/* GICD_IGRPMODR - RAZ/WI from non-secure mode */
242
/* GICD_NSACR - RAZ/WI from non-secure mode */
243
/* GICD_SGIR - RAZ/WI as GICD_CTLR.ARE == 1 */
244
/* GICD_CPENDSGIR - RAZ/WI as GICD_CTLR.ARE == 1 */
245
/* GICD_SPENDSGIR - RAZ/WI as GICD_CTLR.ARE == 1 */
246
/* GICD_IROUTER */
247
static register_read dist_irouter_read;
248
static register_write dist_irouter_write;
249
250
static struct vgic_register dist_registers[] = {
251
VGIC_REGISTER(GICD_CTLR, 4, VGIC_32_BIT, dist_ctlr_read,
252
dist_ctlr_write),
253
VGIC_REGISTER(GICD_TYPER, 4, VGIC_32_BIT, dist_typer_read,
254
gic_ignore_write),
255
VGIC_REGISTER(GICD_IIDR, 4, VGIC_32_BIT, dist_iidr_read,
256
gic_ignore_write),
257
VGIC_REGISTER_RAZ_WI(GICD_STATUSR, 4, VGIC_32_BIT),
258
VGIC_REGISTER(GICD_SETSPI_NSR, 4, VGIC_32_BIT, gic_zero_read,
259
dist_setclrspi_nsr_write),
260
VGIC_REGISTER(GICD_CLRSPI_NSR, 4, VGIC_32_BIT, gic_zero_read,
261
dist_setclrspi_nsr_write),
262
VGIC_REGISTER_RAZ_WI(GICD_SETSPI_SR, 4, VGIC_32_BIT),
263
VGIC_REGISTER_RAZ_WI(GICD_CLRSPI_SR, 4, VGIC_32_BIT),
264
VGIC_REGISTER_RANGE_RAZ_WI(GICD_IGROUPR(0), GICD_IGROUPR(1024), 4,
265
VGIC_32_BIT),
266
267
VGIC_REGISTER_RAZ_WI(GICD_ISENABLER(0), 4, VGIC_32_BIT),
268
VGIC_REGISTER_RANGE(GICD_ISENABLER(32), GICD_ISENABLER(1024), 4,
269
VGIC_32_BIT, dist_isenabler_read, dist_isenabler_write),
270
271
VGIC_REGISTER_RAZ_WI(GICD_ICENABLER(0), 4, VGIC_32_BIT),
272
VGIC_REGISTER_RANGE(GICD_ICENABLER(32), GICD_ICENABLER(1024), 4,
273
VGIC_32_BIT, dist_icenabler_read, dist_icenabler_write),
274
275
VGIC_REGISTER_RAZ_WI(GICD_ISPENDR(0), 4, VGIC_32_BIT),
276
VGIC_REGISTER_RANGE(GICD_ISPENDR(32), GICD_ISPENDR(1024), 4,
277
VGIC_32_BIT, dist_ispendr_read, dist_ispendr_write),
278
279
VGIC_REGISTER_RAZ_WI(GICD_ICPENDR(0), 4, VGIC_32_BIT),
280
VGIC_REGISTER_RANGE(GICD_ICPENDR(32), GICD_ICPENDR(1024), 4,
281
VGIC_32_BIT, dist_icpendr_read, dist_icpendr_write),
282
283
VGIC_REGISTER_RAZ_WI(GICD_ISACTIVER(0), 4, VGIC_32_BIT),
284
VGIC_REGISTER_RANGE(GICD_ISACTIVER(32), GICD_ISACTIVER(1024), 4,
285
VGIC_32_BIT, dist_isactiver_read, dist_isactiver_write),
286
287
VGIC_REGISTER_RAZ_WI(GICD_ICACTIVER(0), 4, VGIC_32_BIT),
288
VGIC_REGISTER_RANGE(GICD_ICACTIVER(32), GICD_ICACTIVER(1024), 4,
289
VGIC_32_BIT, dist_icactiver_read, dist_icactiver_write),
290
291
VGIC_REGISTER_RANGE_RAZ_WI(GICD_IPRIORITYR(0), GICD_IPRIORITYR(32), 4,
292
VGIC_32_BIT | VGIC_8_BIT),
293
VGIC_REGISTER_RANGE(GICD_IPRIORITYR(32), GICD_IPRIORITYR(1024), 4,
294
VGIC_32_BIT | VGIC_8_BIT, dist_ipriorityr_read,
295
dist_ipriorityr_write),
296
297
VGIC_REGISTER_RANGE_RAZ_WI(GICD_ITARGETSR(0), GICD_ITARGETSR(1024), 4,
298
VGIC_32_BIT | VGIC_8_BIT),
299
300
VGIC_REGISTER_RANGE_RAZ_WI(GICD_ICFGR(0), GICD_ICFGR(32), 4,
301
VGIC_32_BIT),
302
VGIC_REGISTER_RANGE(GICD_ICFGR(32), GICD_ICFGR(1024), 4,
303
VGIC_32_BIT, dist_icfgr_read, dist_icfgr_write),
304
/*
305
VGIC_REGISTER_RANGE(GICD_IGRPMODR(0), GICD_IGRPMODR(1024), 4,
306
VGIC_32_BIT, dist_igrpmodr_read, dist_igrpmodr_write),
307
VGIC_REGISTER_RANGE(GICD_NSACR(0), GICD_NSACR(1024), 4,
308
VGIC_32_BIT, dist_nsacr_read, dist_nsacr_write),
309
*/
310
VGIC_REGISTER_RAZ_WI(GICD_SGIR, 4, VGIC_32_BIT),
311
/*
312
VGIC_REGISTER_RANGE(GICD_CPENDSGIR(0), GICD_CPENDSGIR(1024), 4,
313
VGIC_32_BIT | VGIC_8_BIT, dist_cpendsgir_read,
314
dist_cpendsgir_write),
315
VGIC_REGISTER_RANGE(GICD_SPENDSGIR(0), GICD_SPENDSGIR(1024), 4,
316
VGIC_32_BIT | VGIC_8_BIT, dist_spendsgir_read,
317
dist_spendsgir_write),
318
*/
319
VGIC_REGISTER_RANGE(GICD_IROUTER(32), GICD_IROUTER(1024), 8,
320
VGIC_64_BIT | VGIC_32_BIT, dist_irouter_read, dist_irouter_write),
321
322
VGIC_REGISTER_RANGE_RAZ_WI(GICD_PIDR4, GICD_PIDR2, 4, VGIC_32_BIT),
323
VGIC_REGISTER(GICD_PIDR2, 4, VGIC_32_BIT, gic_pidr2_read,
324
gic_ignore_write),
325
VGIC_REGISTER_RANGE_RAZ_WI(GICD_PIDR2 + 4, GICD_SIZE, 4, VGIC_32_BIT),
326
};
327
328
/* GICR_CTLR - Ignore writes as no bits can be set */
329
static register_read redist_ctlr_read;
330
/* GICR_IIDR */
331
static register_read redist_iidr_read;
332
/* GICR_TYPER */
333
static register_read redist_typer_read;
334
/* GICR_STATUSR - RAZ/WI as we don't report errors (yet) */
335
/* GICR_WAKER - RAZ/WI from non-secure mode */
336
/* GICR_SETLPIR - RAZ/WI as no LPIs are supported */
337
/* GICR_CLRLPIR - RAZ/WI as no LPIs are supported */
338
/* GICR_PROPBASER - RAZ/WI as no LPIs are supported */
339
/* GICR_PENDBASER - RAZ/WI as no LPIs are supported */
340
/* GICR_INVLPIR - RAZ/WI as no LPIs are supported */
341
/* GICR_INVALLR - RAZ/WI as no LPIs are supported */
342
/* GICR_SYNCR - RAZ/WI as no LPIs are supported */
343
344
static struct vgic_register redist_rd_registers[] = {
345
VGIC_REGISTER(GICR_CTLR, 4, VGIC_32_BIT, redist_ctlr_read,
346
gic_ignore_write),
347
VGIC_REGISTER(GICR_IIDR, 4, VGIC_32_BIT, redist_iidr_read,
348
gic_ignore_write),
349
VGIC_REGISTER(GICR_TYPER, 8, VGIC_64_BIT | VGIC_32_BIT,
350
redist_typer_read, gic_ignore_write),
351
VGIC_REGISTER_RAZ_WI(GICR_STATUSR, 4, VGIC_32_BIT),
352
VGIC_REGISTER_RAZ_WI(GICR_WAKER, 4, VGIC_32_BIT),
353
VGIC_REGISTER_RAZ_WI(GICR_SETLPIR, 8, VGIC_64_BIT | VGIC_32_BIT),
354
VGIC_REGISTER_RAZ_WI(GICR_CLRLPIR, 8, VGIC_64_BIT | VGIC_32_BIT),
355
VGIC_REGISTER_RAZ_WI(GICR_PROPBASER, 8, VGIC_64_BIT | VGIC_32_BIT),
356
VGIC_REGISTER_RAZ_WI(GICR_PENDBASER, 8, VGIC_64_BIT | VGIC_32_BIT),
357
VGIC_REGISTER_RAZ_WI(GICR_INVLPIR, 8, VGIC_64_BIT | VGIC_32_BIT),
358
VGIC_REGISTER_RAZ_WI(GICR_INVALLR, 8, VGIC_64_BIT | VGIC_32_BIT),
359
VGIC_REGISTER_RAZ_WI(GICR_SYNCR, 4, VGIC_32_BIT),
360
361
/* These are identical to the dist registers */
362
VGIC_REGISTER_RANGE_RAZ_WI(GICD_PIDR4, GICD_PIDR2, 4, VGIC_32_BIT),
363
VGIC_REGISTER(GICD_PIDR2, 4, VGIC_32_BIT, gic_pidr2_read,
364
gic_ignore_write),
365
VGIC_REGISTER_RANGE_RAZ_WI(GICD_PIDR2 + 4, GICD_SIZE, 4,
366
VGIC_32_BIT),
367
};
368
369
/* GICR_IGROUPR0 - RAZ/WI from non-secure mode */
370
/* GICR_ISENABLER0 */
371
static register_read redist_ienabler0_read;
372
static register_write redist_isenabler0_write;
373
/* GICR_ICENABLER0 */
374
static register_write redist_icenabler0_write;
375
/* GICR_ISPENDR0 */
376
static register_read redist_ipendr0_read;
377
static register_write redist_ispendr0_write;
378
/* GICR_ICPENDR0 */
379
static register_write redist_icpendr0_write;
380
/* GICR_ISACTIVER0 */
381
static register_read redist_iactiver0_read;
382
static register_write redist_isactiver0_write;
383
/* GICR_ICACTIVER0 */
384
static register_write redist_icactiver0_write;
385
/* GICR_IPRIORITYR */
386
static register_read redist_ipriorityr_read;
387
static register_write redist_ipriorityr_write;
388
/* GICR_ICFGR0 - RAZ/WI from non-secure mode */
389
/* GICR_ICFGR1 */
390
static register_read redist_icfgr1_read;
391
static register_write redist_icfgr1_write;
392
/* GICR_IGRPMODR0 - RAZ/WI from non-secure mode */
393
/* GICR_NSCAR - RAZ/WI from non-secure mode */
394
395
static struct vgic_register redist_sgi_registers[] = {
396
VGIC_REGISTER_RAZ_WI(GICR_IGROUPR0, 4, VGIC_32_BIT),
397
VGIC_REGISTER(GICR_ISENABLER0, 4, VGIC_32_BIT, redist_ienabler0_read,
398
redist_isenabler0_write),
399
VGIC_REGISTER(GICR_ICENABLER0, 4, VGIC_32_BIT, redist_ienabler0_read,
400
redist_icenabler0_write),
401
VGIC_REGISTER(GICR_ISPENDR0, 4, VGIC_32_BIT, redist_ipendr0_read,
402
redist_ispendr0_write),
403
VGIC_REGISTER(GICR_ICPENDR0, 4, VGIC_32_BIT, redist_ipendr0_read,
404
redist_icpendr0_write),
405
VGIC_REGISTER(GICR_ISACTIVER0, 4, VGIC_32_BIT, redist_iactiver0_read,
406
redist_isactiver0_write),
407
VGIC_REGISTER(GICR_ICACTIVER0, 4, VGIC_32_BIT, redist_iactiver0_read,
408
redist_icactiver0_write),
409
VGIC_REGISTER_RANGE(GICR_IPRIORITYR(0), GICR_IPRIORITYR(32), 4,
410
VGIC_32_BIT | VGIC_8_BIT, redist_ipriorityr_read,
411
redist_ipriorityr_write),
412
VGIC_REGISTER_RAZ_WI(GICR_ICFGR0, 4, VGIC_32_BIT),
413
VGIC_REGISTER(GICR_ICFGR1, 4, VGIC_32_BIT, redist_icfgr1_read,
414
redist_icfgr1_write),
415
VGIC_REGISTER_RAZ_WI(GICR_IGRPMODR0, 4, VGIC_32_BIT),
416
VGIC_REGISTER_RAZ_WI(GICR_NSACR, 4, VGIC_32_BIT),
417
};
418
419
static struct vgic_v3_virt_features virt_features;
420
421
static struct vgic_v3_irq *vgic_v3_get_irq(struct hyp *, int, uint32_t);
422
static void vgic_v3_release_irq(struct vgic_v3_irq *);
423
424
/* TODO: Move to a common file */
425
static int
426
mpidr_to_vcpu(struct hyp *hyp, uint64_t mpidr)
427
{
428
struct vm *vm;
429
struct hypctx *hypctx;
430
431
vm = hyp->vm;
432
for (int i = 0; i < vm_get_maxcpus(vm); i++) {
433
hypctx = hyp->ctx[i];
434
if (hypctx != NULL && (hypctx->vmpidr_el2 & GICD_AFF) == mpidr)
435
return (i);
436
}
437
return (-1);
438
}
439
440
static void
441
vgic_v3_vminit(device_t dev, struct hyp *hyp)
442
{
443
struct vgic_v3 *vgic;
444
445
hyp->vgic = malloc(sizeof(*hyp->vgic), M_VGIC_V3,
446
M_WAITOK | M_ZERO);
447
vgic = hyp->vgic;
448
449
/*
450
* Configure the Distributor control register. The register resets to an
451
* architecturally UNKNOWN value, so we reset to 0 to disable all
452
* functionality controlled by the register.
453
*
454
* The exception is GICD_CTLR.DS, which is RA0/WI when the Distributor
455
* supports one security state (ARM GIC Architecture Specification for
456
* GICv3 and GICv4, p. 4-464)
457
*/
458
vgic->gicd_ctlr = 0;
459
460
mtx_init(&vgic->dist_mtx, "VGICv3 Distributor lock", NULL,
461
MTX_SPIN);
462
}
463
464
static void
465
vgic_v3_cpuinit(device_t dev, struct hypctx *hypctx)
466
{
467
struct vgic_v3_cpu *vgic_cpu;
468
struct vgic_v3_irq *irq;
469
int i, irqid;
470
471
hypctx->vgic_cpu = malloc(sizeof(*hypctx->vgic_cpu),
472
M_VGIC_V3, M_WAITOK | M_ZERO);
473
vgic_cpu = hypctx->vgic_cpu;
474
475
mtx_init(&vgic_cpu->lr_mtx, "VGICv3 ICH_LR_EL2 lock", NULL, MTX_SPIN);
476
477
/* Set the SGI and PPI state */
478
for (irqid = 0; irqid < VGIC_PRV_I_NUM; irqid++) {
479
irq = &vgic_cpu->private_irqs[irqid];
480
481
mtx_init(&irq->irq_spinmtx, "VGIC IRQ spinlock", NULL,
482
MTX_SPIN);
483
irq->irq = irqid;
484
irq->mpidr = hypctx->vmpidr_el2 & GICD_AFF;
485
irq->target_vcpu = vcpu_vcpuid(hypctx->vcpu);
486
MPASS(irq->target_vcpu >= 0);
487
488
if (irqid < VGIC_SGI_NUM) {
489
/* SGIs */
490
irq->enabled = true;
491
irq->config = VGIC_CONFIG_EDGE;
492
} else {
493
/* PPIs */
494
irq->config = VGIC_CONFIG_LEVEL;
495
}
496
irq->priority = 0;
497
}
498
499
/*
500
* Configure the Interrupt Controller Hyp Control Register.
501
*
502
* ICH_HCR_EL2_En: enable virtual CPU interface.
503
*
504
* Maintenance interrupts are disabled.
505
*/
506
hypctx->vgic_v3_regs.ich_hcr_el2 = ICH_HCR_EL2_En;
507
508
/*
509
* Configure the Interrupt Controller Virtual Machine Control Register.
510
*
511
* ICH_VMCR_EL2_VPMR: lowest priority mask for the VCPU interface
512
* ICH_VMCR_EL2_VBPR1_NO_PREEMPTION: disable interrupt preemption for
513
* Group 1 interrupts
514
* ICH_VMCR_EL2_VBPR0_NO_PREEMPTION: disable interrupt preemption for
515
* Group 0 interrupts
516
* ~ICH_VMCR_EL2_VEOIM: writes to EOI registers perform priority drop
517
* and interrupt deactivation.
518
* ICH_VMCR_EL2_VENG0: virtual Group 0 interrupts enabled.
519
* ICH_VMCR_EL2_VENG1: virtual Group 1 interrupts enabled.
520
*/
521
hypctx->vgic_v3_regs.ich_vmcr_el2 =
522
(virt_features.min_prio << ICH_VMCR_EL2_VPMR_SHIFT) |
523
ICH_VMCR_EL2_VBPR1_NO_PREEMPTION | ICH_VMCR_EL2_VBPR0_NO_PREEMPTION;
524
hypctx->vgic_v3_regs.ich_vmcr_el2 &= ~ICH_VMCR_EL2_VEOIM;
525
hypctx->vgic_v3_regs.ich_vmcr_el2 |= ICH_VMCR_EL2_VENG0 |
526
ICH_VMCR_EL2_VENG1;
527
528
hypctx->vgic_v3_regs.ich_lr_num = virt_features.ich_lr_num;
529
for (i = 0; i < hypctx->vgic_v3_regs.ich_lr_num; i++)
530
hypctx->vgic_v3_regs.ich_lr_el2[i] = 0UL;
531
vgic_cpu->ich_lr_used = 0;
532
TAILQ_INIT(&vgic_cpu->irq_act_pend);
533
534
hypctx->vgic_v3_regs.ich_apr_num = virt_features.ich_apr_num;
535
}
536
537
static void
538
vgic_v3_cpucleanup(device_t dev, struct hypctx *hypctx)
539
{
540
struct vgic_v3_cpu *vgic_cpu;
541
struct vgic_v3_irq *irq;
542
int irqid;
543
544
vgic_cpu = hypctx->vgic_cpu;
545
for (irqid = 0; irqid < VGIC_PRV_I_NUM; irqid++) {
546
irq = &vgic_cpu->private_irqs[irqid];
547
mtx_destroy(&irq->irq_spinmtx);
548
}
549
550
mtx_destroy(&vgic_cpu->lr_mtx);
551
free(hypctx->vgic_cpu, M_VGIC_V3);
552
}
553
554
static void
555
vgic_v3_vmcleanup(device_t dev, struct hyp *hyp)
556
{
557
mtx_destroy(&hyp->vgic->dist_mtx);
558
free(hyp->vgic, M_VGIC_V3);
559
}
560
561
static int
562
vgic_v3_max_cpu_count(device_t dev, struct hyp *hyp)
563
{
564
struct vgic_v3 *vgic;
565
size_t count;
566
int16_t max_count;
567
568
vgic = hyp->vgic;
569
max_count = vm_get_maxcpus(hyp->vm);
570
571
/* No registers, assume the maximum CPUs */
572
if (vgic->redist_start == 0 && vgic->redist_end == 0)
573
return (max_count);
574
575
count = (vgic->redist_end - vgic->redist_start) /
576
(GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
577
578
/*
579
* max_count is smaller than INT_MAX so will also limit count
580
* to a positive integer value.
581
*/
582
if (count > max_count)
583
return (max_count);
584
585
return (count);
586
}
587
588
static bool
589
vgic_v3_irq_pending(struct vgic_v3_irq *irq)
590
{
591
if ((irq->config & VGIC_CONFIG_MASK) == VGIC_CONFIG_LEVEL) {
592
return (irq->pending || irq->level);
593
} else {
594
return (irq->pending);
595
}
596
}
597
598
static bool
599
vgic_v3_queue_irq(struct hyp *hyp, struct vgic_v3_cpu *vgic_cpu,
600
int vcpuid, struct vgic_v3_irq *irq)
601
{
602
MPASS(vcpuid >= 0);
603
MPASS(vcpuid < vm_get_maxcpus(hyp->vm));
604
605
mtx_assert(&vgic_cpu->lr_mtx, MA_OWNED);
606
mtx_assert(&irq->irq_spinmtx, MA_OWNED);
607
608
/* No need to queue the IRQ */
609
if (!irq->level && !irq->pending)
610
return (false);
611
612
if (!irq->on_aplist) {
613
irq->on_aplist = true;
614
TAILQ_INSERT_TAIL(&vgic_cpu->irq_act_pend, irq, act_pend_list);
615
}
616
return (true);
617
}
618
619
static uint64_t
620
gic_reg_value_64(uint64_t field, uint64_t val, u_int offset, u_int size)
621
{
622
uint32_t mask;
623
624
if (offset != 0 || size != 8) {
625
mask = ((1ul << (size * 8)) - 1) << (offset * 8);
626
/* Shift the new bits to the correct place */
627
val <<= (offset * 8);
628
/* Keep only the interesting bits */
629
val &= mask;
630
/* Add the bits we are keeping from the old value */
631
val |= field & ~mask;
632
}
633
634
return (val);
635
}
636
637
static void
638
gic_pidr2_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
639
void *arg)
640
{
641
*rval = GICR_PIDR2_ARCH_GICv3 << GICR_PIDR2_ARCH_SHIFT;
642
}
643
644
/* Common read-only/write-ignored helpers */
645
static void
646
gic_zero_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
647
void *arg)
648
{
649
*rval = 0;
650
}
651
652
static void
653
gic_ignore_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
654
uint64_t wval, void *arg)
655
{
656
/* Nothing to do */
657
}
658
659
static uint64_t
660
read_enabler(struct hypctx *hypctx, int n)
661
{
662
struct vgic_v3_irq *irq;
663
uint64_t ret;
664
uint32_t irq_base;
665
int i;
666
667
ret = 0;
668
irq_base = n * 32;
669
for (i = 0; i < 32; i++) {
670
irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
671
irq_base + i);
672
if (irq == NULL)
673
continue;
674
675
if (irq->enabled)
676
ret |= 1u << i;
677
vgic_v3_release_irq(irq);
678
}
679
680
return (ret);
681
}
682
683
static void
684
write_enabler(struct hypctx *hypctx,int n, bool set, uint64_t val)
685
{
686
struct vgic_v3_irq *irq;
687
uint32_t irq_base;
688
int i;
689
690
irq_base = n * 32;
691
for (i = 0; i < 32; i++) {
692
/* We only change interrupts when the appropriate bit is set */
693
if ((val & (1u << i)) == 0)
694
continue;
695
696
/* Find the interrupt this bit represents */
697
irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
698
irq_base + i);
699
if (irq == NULL)
700
continue;
701
702
irq->enabled = set;
703
vgic_v3_release_irq(irq);
704
}
705
}
706
707
static uint64_t
708
read_pendr(struct hypctx *hypctx, int n)
709
{
710
struct vgic_v3_irq *irq;
711
uint64_t ret;
712
uint32_t irq_base;
713
int i;
714
715
ret = 0;
716
irq_base = n * 32;
717
for (i = 0; i < 32; i++) {
718
irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
719
irq_base + i);
720
if (irq == NULL)
721
continue;
722
723
if (vgic_v3_irq_pending(irq))
724
ret |= 1u << i;
725
vgic_v3_release_irq(irq);
726
}
727
728
return (ret);
729
}
730
731
static uint64_t
732
write_pendr(struct hypctx *hypctx, int n, bool set, uint64_t val)
733
{
734
struct vgic_v3_cpu *vgic_cpu;
735
struct vgic_v3_irq *irq;
736
struct hyp *hyp;
737
struct hypctx *target_hypctx;
738
uint64_t ret;
739
uint32_t irq_base;
740
int target_vcpu, i;
741
bool notify;
742
743
hyp = hypctx->hyp;
744
ret = 0;
745
irq_base = n * 32;
746
for (i = 0; i < 32; i++) {
747
/* We only change interrupts when the appropriate bit is set */
748
if ((val & (1u << i)) == 0)
749
continue;
750
751
irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
752
irq_base + i);
753
if (irq == NULL)
754
continue;
755
756
notify = false;
757
target_vcpu = irq->target_vcpu;
758
if (target_vcpu < 0)
759
goto next_irq;
760
target_hypctx = hyp->ctx[target_vcpu];
761
if (target_hypctx == NULL)
762
goto next_irq;
763
vgic_cpu = target_hypctx->vgic_cpu;
764
765
if (!set) {
766
/* pending -> not pending */
767
irq->pending = false;
768
} else {
769
irq->pending = true;
770
mtx_lock_spin(&vgic_cpu->lr_mtx);
771
notify = vgic_v3_queue_irq(hyp, vgic_cpu, target_vcpu,
772
irq);
773
mtx_unlock_spin(&vgic_cpu->lr_mtx);
774
}
775
next_irq:
776
vgic_v3_release_irq(irq);
777
778
if (notify)
779
vcpu_notify_event(vm_vcpu(hyp->vm, target_vcpu));
780
}
781
782
return (ret);
783
}
784
785
static uint64_t
786
read_activer(struct hypctx *hypctx, int n)
787
{
788
struct vgic_v3_irq *irq;
789
uint64_t ret;
790
uint32_t irq_base;
791
int i;
792
793
ret = 0;
794
irq_base = n * 32;
795
for (i = 0; i < 32; i++) {
796
irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
797
irq_base + i);
798
if (irq == NULL)
799
continue;
800
801
if (irq->active)
802
ret |= 1u << i;
803
vgic_v3_release_irq(irq);
804
}
805
806
return (ret);
807
}
808
809
static void
810
write_activer(struct hypctx *hypctx, u_int n, bool set, uint64_t val)
811
{
812
struct vgic_v3_cpu *vgic_cpu;
813
struct vgic_v3_irq *irq;
814
struct hyp *hyp;
815
struct hypctx *target_hypctx;
816
uint32_t irq_base;
817
int target_vcpu, i;
818
bool notify;
819
820
hyp = hypctx->hyp;
821
irq_base = n * 32;
822
for (i = 0; i < 32; i++) {
823
/* We only change interrupts when the appropriate bit is set */
824
if ((val & (1u << i)) == 0)
825
continue;
826
827
irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
828
irq_base + i);
829
if (irq == NULL)
830
continue;
831
832
notify = false;
833
target_vcpu = irq->target_vcpu;
834
if (target_vcpu < 0)
835
goto next_irq;
836
target_hypctx = hyp->ctx[target_vcpu];
837
if (target_hypctx == NULL)
838
goto next_irq;
839
vgic_cpu = target_hypctx->vgic_cpu;
840
841
if (!set) {
842
/* active -> not active */
843
irq->active = false;
844
} else {
845
/* not active -> active */
846
irq->active = true;
847
mtx_lock_spin(&vgic_cpu->lr_mtx);
848
notify = vgic_v3_queue_irq(hyp, vgic_cpu, target_vcpu,
849
irq);
850
mtx_unlock_spin(&vgic_cpu->lr_mtx);
851
}
852
next_irq:
853
vgic_v3_release_irq(irq);
854
855
if (notify)
856
vcpu_notify_event(vm_vcpu(hyp->vm, target_vcpu));
857
}
858
}
859
860
static uint64_t
861
read_priorityr(struct hypctx *hypctx, int n)
862
{
863
struct vgic_v3_irq *irq;
864
uint64_t ret;
865
uint32_t irq_base;
866
int i;
867
868
ret = 0;
869
irq_base = n * 4;
870
for (i = 0; i < 4; i++) {
871
irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
872
irq_base + i);
873
if (irq == NULL)
874
continue;
875
876
ret |= ((uint64_t)irq->priority) << (i * 8);
877
vgic_v3_release_irq(irq);
878
}
879
880
return (ret);
881
}
882
883
static void
884
write_priorityr(struct hypctx *hypctx, u_int irq_base, u_int size, uint64_t val)
885
{
886
struct vgic_v3_irq *irq;
887
int i;
888
889
for (i = 0; i < size; i++) {
890
irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
891
irq_base + i);
892
if (irq == NULL)
893
continue;
894
895
/* Set the priority. We support 32 priority steps (5 bits) */
896
irq->priority = (val >> (i * 8)) & 0xf8;
897
vgic_v3_release_irq(irq);
898
}
899
}
900
901
static uint64_t
902
read_config(struct hypctx *hypctx, int n)
903
{
904
struct vgic_v3_irq *irq;
905
uint64_t ret;
906
uint32_t irq_base;
907
int i;
908
909
ret = 0;
910
irq_base = n * 16;
911
for (i = 0; i < 16; i++) {
912
irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
913
irq_base + i);
914
if (irq == NULL)
915
continue;
916
917
ret |= ((uint64_t)irq->config) << (i * 2);
918
vgic_v3_release_irq(irq);
919
}
920
921
return (ret);
922
}
923
924
static void
925
write_config(struct hypctx *hypctx, int n, uint64_t val)
926
{
927
struct vgic_v3_irq *irq;
928
uint32_t irq_base;
929
int i;
930
931
irq_base = n * 16;
932
for (i = 0; i < 16; i++) {
933
/*
934
* The config can't be changed for SGIs and PPIs. SGIs have
935
* an edge-triggered behaviour, and the register is
936
* implementation defined to be read-only for PPIs.
937
*/
938
if (irq_base + i < VGIC_PRV_I_NUM)
939
continue;
940
941
irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
942
irq_base + i);
943
if (irq == NULL)
944
continue;
945
946
/* Bit 0 is RES0 */
947
irq->config = (val >> (i * 2)) & VGIC_CONFIG_MASK;
948
vgic_v3_release_irq(irq);
949
}
950
}
951
952
static uint64_t
953
read_route(struct hypctx *hypctx, int n)
954
{
955
struct vgic_v3_irq *irq;
956
uint64_t mpidr;
957
958
irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), n);
959
if (irq == NULL)
960
return (0);
961
962
mpidr = irq->mpidr;
963
vgic_v3_release_irq(irq);
964
965
return (mpidr);
966
}
967
968
static void
969
write_route(struct hypctx *hypctx, int n, uint64_t val, u_int offset,
970
u_int size)
971
{
972
struct vgic_v3_irq *irq;
973
974
irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), n);
975
if (irq == NULL)
976
return;
977
978
irq->mpidr = gic_reg_value_64(irq->mpidr, val, offset, size) & GICD_AFF;
979
irq->target_vcpu = mpidr_to_vcpu(hypctx->hyp, irq->mpidr);
980
/*
981
* If the interrupt is pending we can either use the old mpidr, or
982
* the new mpidr. To simplify this code we use the old value so we
983
* don't need to move the interrupt until the next time it is
984
* moved to the pending state.
985
*/
986
vgic_v3_release_irq(irq);
987
}
988
989
/*
990
* Distributor register handlers.
991
*/
992
/* GICD_CTLR */
993
static void
994
dist_ctlr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
995
void *arg)
996
{
997
struct hyp *hyp;
998
struct vgic_v3 *vgic;
999
1000
hyp = hypctx->hyp;
1001
vgic = hyp->vgic;
1002
1003
mtx_lock_spin(&vgic->dist_mtx);
1004
*rval = vgic->gicd_ctlr;
1005
mtx_unlock_spin(&vgic->dist_mtx);
1006
1007
/* Writes are never pending */
1008
*rval &= ~GICD_CTLR_RWP;
1009
}
1010
1011
static void
1012
dist_ctlr_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1013
uint64_t wval, void *arg)
1014
{
1015
struct vgic_v3 *vgic;
1016
1017
MPASS(offset == 0);
1018
MPASS(size == 4);
1019
vgic = hypctx->hyp->vgic;
1020
1021
/*
1022
* GICv2 backwards compatibility is not implemented so
1023
* ARE_NS is RAO/WI. This means EnableGrp1 is RES0.
1024
*
1025
* EnableGrp1A is supported, and RWP is read-only.
1026
*
1027
* All other bits are RES0 from non-secure mode as we
1028
* implement as if we are in a system with two security
1029
* states.
1030
*/
1031
wval &= GICD_CTLR_G1A;
1032
wval |= GICD_CTLR_ARE_NS;
1033
mtx_lock_spin(&vgic->dist_mtx);
1034
vgic->gicd_ctlr = wval;
1035
/* TODO: Wake any vcpus that have interrupts pending */
1036
mtx_unlock_spin(&vgic->dist_mtx);
1037
}
1038
1039
/* GICD_TYPER */
1040
static void
1041
dist_typer_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1042
void *arg)
1043
{
1044
uint32_t typer;
1045
1046
typer = (10 - 1) << GICD_TYPER_IDBITS_SHIFT;
1047
typer |= GICD_TYPER_MBIS;
1048
/* ITLinesNumber: */
1049
typer |= howmany(VGIC_NIRQS + 1, 32) - 1;
1050
1051
*rval = typer;
1052
}
1053
1054
/* GICD_IIDR */
1055
static void
1056
dist_iidr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1057
{
1058
*rval = VGIC_IIDR;
1059
}
1060
1061
/* GICD_SETSPI_NSR & GICD_CLRSPI_NSR */
1062
static void
1063
dist_setclrspi_nsr_write(struct hypctx *hypctx, u_int reg, u_int offset,
1064
u_int size, uint64_t wval, void *arg)
1065
{
1066
uint32_t irqid;
1067
1068
MPASS(offset == 0);
1069
MPASS(size == 4);
1070
irqid = wval & GICD_SPI_INTID_MASK;
1071
INJECT_IRQ(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), irqid,
1072
reg == GICD_SETSPI_NSR);
1073
}
1074
1075
/* GICD_ISENABLER */
1076
static void
1077
dist_isenabler_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1078
{
1079
int n;
1080
1081
n = (reg - GICD_ISENABLER(0)) / 4;
1082
/* GICD_ISENABLER0 is RAZ/WI so handled separately */
1083
MPASS(n > 0);
1084
*rval = read_enabler(hypctx, n);
1085
}
1086
1087
static void
1088
dist_isenabler_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1089
uint64_t wval, void *arg)
1090
{
1091
int n;
1092
1093
MPASS(offset == 0);
1094
MPASS(size == 4);
1095
n = (reg - GICD_ISENABLER(0)) / 4;
1096
/* GICD_ISENABLER0 is RAZ/WI so handled separately */
1097
MPASS(n > 0);
1098
write_enabler(hypctx, n, true, wval);
1099
}
1100
1101
/* GICD_ICENABLER */
1102
static void
1103
dist_icenabler_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1104
{
1105
int n;
1106
1107
n = (reg - GICD_ICENABLER(0)) / 4;
1108
/* GICD_ICENABLER0 is RAZ/WI so handled separately */
1109
MPASS(n > 0);
1110
*rval = read_enabler(hypctx, n);
1111
}
1112
1113
static void
1114
dist_icenabler_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1115
uint64_t wval, void *arg)
1116
{
1117
int n;
1118
1119
MPASS(offset == 0);
1120
MPASS(size == 4);
1121
n = (reg - GICD_ISENABLER(0)) / 4;
1122
/* GICD_ICENABLER0 is RAZ/WI so handled separately */
1123
MPASS(n > 0);
1124
write_enabler(hypctx, n, false, wval);
1125
}
1126
1127
/* GICD_ISPENDR */
1128
static void
1129
dist_ispendr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1130
{
1131
int n;
1132
1133
n = (reg - GICD_ISPENDR(0)) / 4;
1134
/* GICD_ISPENDR0 is RAZ/WI so handled separately */
1135
MPASS(n > 0);
1136
*rval = read_pendr(hypctx, n);
1137
}
1138
1139
static void
1140
dist_ispendr_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1141
uint64_t wval, void *arg)
1142
{
1143
int n;
1144
1145
MPASS(offset == 0);
1146
MPASS(size == 4);
1147
n = (reg - GICD_ISPENDR(0)) / 4;
1148
/* GICD_ISPENDR0 is RAZ/WI so handled separately */
1149
MPASS(n > 0);
1150
write_pendr(hypctx, n, true, wval);
1151
}
1152
1153
/* GICD_ICPENDR */
1154
static void
1155
dist_icpendr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1156
{
1157
int n;
1158
1159
n = (reg - GICD_ICPENDR(0)) / 4;
1160
/* GICD_ICPENDR0 is RAZ/WI so handled separately */
1161
MPASS(n > 0);
1162
*rval = read_pendr(hypctx, n);
1163
}
1164
1165
static void
1166
dist_icpendr_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1167
uint64_t wval, void *arg)
1168
{
1169
int n;
1170
1171
MPASS(offset == 0);
1172
MPASS(size == 4);
1173
n = (reg - GICD_ICPENDR(0)) / 4;
1174
/* GICD_ICPENDR0 is RAZ/WI so handled separately */
1175
MPASS(n > 0);
1176
write_pendr(hypctx, n, false, wval);
1177
}
1178
1179
/* GICD_ISACTIVER */
1180
/* Affinity routing is enabled so isactiver0 is RAZ/WI */
1181
static void
1182
dist_isactiver_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1183
{
1184
int n;
1185
1186
n = (reg - GICD_ISACTIVER(0)) / 4;
1187
/* GICD_ISACTIVER0 is RAZ/WI so handled separately */
1188
MPASS(n > 0);
1189
*rval = read_activer(hypctx, n);
1190
}
1191
1192
static void
1193
dist_isactiver_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1194
uint64_t wval, void *arg)
1195
{
1196
int n;
1197
1198
MPASS(offset == 0);
1199
MPASS(size == 4);
1200
n = (reg - GICD_ISACTIVER(0)) / 4;
1201
/* GICD_ISACTIVE0 is RAZ/WI so handled separately */
1202
MPASS(n > 0);
1203
write_activer(hypctx, n, true, wval);
1204
}
1205
1206
/* GICD_ICACTIVER */
1207
static void
1208
dist_icactiver_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1209
void *arg)
1210
{
1211
int n;
1212
1213
n = (reg - GICD_ICACTIVER(0)) / 4;
1214
/* GICD_ICACTIVE0 is RAZ/WI so handled separately */
1215
MPASS(n > 0);
1216
*rval = read_activer(hypctx, n);
1217
}
1218
1219
static void
1220
dist_icactiver_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1221
uint64_t wval, void *arg)
1222
{
1223
int n;
1224
1225
MPASS(offset == 0);
1226
MPASS(size == 4);
1227
n = (reg - GICD_ICACTIVER(0)) / 4;
1228
/* GICD_ICACTIVE0 is RAZ/WI so handled separately */
1229
MPASS(n > 0);
1230
write_activer(hypctx, n, false, wval);
1231
}
1232
1233
/* GICD_IPRIORITYR */
1234
/* Affinity routing is enabled so ipriorityr0-7 is RAZ/WI */
1235
static void
1236
dist_ipriorityr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1237
void *arg)
1238
{
1239
int n;
1240
1241
n = (reg - GICD_IPRIORITYR(0)) / 4;
1242
/* GICD_IPRIORITY0-7 is RAZ/WI so handled separately */
1243
MPASS(n > 7);
1244
*rval = read_priorityr(hypctx, n);
1245
}
1246
1247
static void
1248
dist_ipriorityr_write(struct hypctx *hypctx, u_int reg, u_int offset,
1249
u_int size, uint64_t wval, void *arg)
1250
{
1251
u_int irq_base;
1252
1253
irq_base = (reg - GICD_IPRIORITYR(0)) + offset;
1254
/* GICD_IPRIORITY0-7 is RAZ/WI so handled separately */
1255
MPASS(irq_base > 31);
1256
write_priorityr(hypctx, irq_base, size, wval);
1257
}
1258
1259
/* GICD_ICFGR */
1260
static void
1261
dist_icfgr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1262
{
1263
int n;
1264
1265
n = (reg - GICD_ICFGR(0)) / 4;
1266
/* GICD_ICFGR0-1 are RAZ/WI so handled separately */
1267
MPASS(n > 1);
1268
*rval = read_config(hypctx, n);
1269
}
1270
1271
static void
1272
dist_icfgr_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1273
uint64_t wval, void *arg)
1274
{
1275
int n;
1276
1277
MPASS(offset == 0);
1278
MPASS(size == 4);
1279
n = (reg - GICD_ICFGR(0)) / 4;
1280
/* GICD_ICFGR0-1 are RAZ/WI so handled separately */
1281
MPASS(n > 1);
1282
write_config(hypctx, n, wval);
1283
}
1284
1285
/* GICD_IROUTER */
1286
static void
1287
dist_irouter_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1288
{
1289
int n;
1290
1291
n = (reg - GICD_IROUTER(0)) / 8;
1292
/* GICD_IROUTER0-31 don't exist */
1293
MPASS(n > 31);
1294
*rval = read_route(hypctx, n);
1295
}
1296
1297
static void
1298
dist_irouter_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1299
uint64_t wval, void *arg)
1300
{
1301
int n;
1302
1303
n = (reg - GICD_IROUTER(0)) / 8;
1304
/* GICD_IROUTER0-31 don't exist */
1305
MPASS(n > 31);
1306
write_route(hypctx, n, wval, offset, size);
1307
}
1308
1309
static bool
1310
vgic_register_read(struct hypctx *hypctx, struct vgic_register *reg_list,
1311
u_int reg_list_size, u_int reg, u_int size, uint64_t *rval, void *arg)
1312
{
1313
u_int i, offset;
1314
1315
for (i = 0; i < reg_list_size; i++) {
1316
if (reg_list[i].start <= reg && reg_list[i].end >= reg + size) {
1317
offset = reg & (reg_list[i].size - 1);
1318
reg -= offset;
1319
if ((reg_list[i].flags & size) != 0) {
1320
reg_list[i].read(hypctx, reg, rval, NULL);
1321
1322
/* Move the bits into the correct place */
1323
*rval >>= (offset * 8);
1324
if (size < 8) {
1325
*rval &= (1ul << (size * 8)) - 1;
1326
}
1327
} else {
1328
/*
1329
* The access is an invalid size. Section
1330
* 12.1.3 "GIC memory-mapped register access"
1331
* of the GICv3 and GICv4 spec issue H
1332
* (IHI0069) lists the options. For a read
1333
* the controller returns unknown data, in
1334
* this case it is zero.
1335
*/
1336
*rval = 0;
1337
}
1338
return (true);
1339
}
1340
}
1341
return (false);
1342
}
1343
1344
static bool
1345
vgic_register_write(struct hypctx *hypctx, struct vgic_register *reg_list,
1346
u_int reg_list_size, u_int reg, u_int size, uint64_t wval, void *arg)
1347
{
1348
u_int i, offset;
1349
1350
for (i = 0; i < reg_list_size; i++) {
1351
if (reg_list[i].start <= reg && reg_list[i].end >= reg + size) {
1352
offset = reg & (reg_list[i].size - 1);
1353
reg -= offset;
1354
if ((reg_list[i].flags & size) != 0) {
1355
reg_list[i].write(hypctx, reg, offset,
1356
size, wval, NULL);
1357
} else {
1358
/*
1359
* See the comment in vgic_register_read.
1360
* For writes the controller ignores the
1361
* operation.
1362
*/
1363
}
1364
return (true);
1365
}
1366
}
1367
return (false);
1368
}
1369
1370
static int
1371
dist_read(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t *rval,
1372
int size, void *arg)
1373
{
1374
struct hyp *hyp;
1375
struct hypctx *hypctx;
1376
struct vgic_v3 *vgic;
1377
uint64_t reg;
1378
1379
hypctx = vcpu_get_cookie(vcpu);
1380
hyp = hypctx->hyp;
1381
vgic = hyp->vgic;
1382
1383
/* Check the register is one of ours and is the correct size */
1384
if (fault_ipa < vgic->dist_start || fault_ipa + size > vgic->dist_end) {
1385
return (EINVAL);
1386
}
1387
1388
reg = fault_ipa - vgic->dist_start;
1389
/*
1390
* As described in vgic_register_read an access with an invalid
1391
* alignment is read with an unknown value
1392
*/
1393
if ((reg & (size - 1)) != 0) {
1394
*rval = 0;
1395
return (0);
1396
}
1397
1398
if (vgic_register_read(hypctx, dist_registers, nitems(dist_registers),
1399
reg, size, rval, NULL))
1400
return (0);
1401
1402
/* Reserved register addresses are RES0 so we can hardware it to 0 */
1403
*rval = 0;
1404
1405
return (0);
1406
}
1407
1408
static int
1409
dist_write(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t wval,
1410
int size, void *arg)
1411
{
1412
struct hyp *hyp;
1413
struct hypctx *hypctx;
1414
struct vgic_v3 *vgic;
1415
uint64_t reg;
1416
1417
hypctx = vcpu_get_cookie(vcpu);
1418
hyp = hypctx->hyp;
1419
vgic = hyp->vgic;
1420
1421
/* Check the register is one of ours and is the correct size */
1422
if (fault_ipa < vgic->dist_start || fault_ipa + size > vgic->dist_end) {
1423
return (EINVAL);
1424
}
1425
1426
reg = fault_ipa - vgic->dist_start;
1427
/*
1428
* As described in vgic_register_read an access with an invalid
1429
* alignment is write ignored.
1430
*/
1431
if ((reg & (size - 1)) != 0)
1432
return (0);
1433
1434
if (vgic_register_write(hypctx, dist_registers, nitems(dist_registers),
1435
reg, size, wval, NULL))
1436
return (0);
1437
1438
/* Reserved register addresses are RES0 so we can ignore the write */
1439
return (0);
1440
}
1441
1442
/*
1443
* Redistributor register handlers.
1444
*
1445
* RD_base:
1446
*/
1447
/* GICR_CTLR */
1448
static void
1449
redist_ctlr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1450
{
1451
/* LPIs not supported */
1452
*rval = 0;
1453
}
1454
1455
/* GICR_IIDR */
1456
static void
1457
redist_iidr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1458
{
1459
*rval = VGIC_IIDR;
1460
}
1461
1462
/* GICR_TYPER */
1463
static void
1464
redist_typer_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1465
{
1466
uint64_t aff, gicr_typer, vmpidr_el2;
1467
bool last_vcpu;
1468
1469
last_vcpu = false;
1470
if (vcpu_vcpuid(hypctx->vcpu) == (vgic_max_cpu_count(hypctx->hyp) - 1))
1471
last_vcpu = true;
1472
1473
vmpidr_el2 = hypctx->vmpidr_el2;
1474
MPASS(vmpidr_el2 != 0);
1475
/*
1476
* Get affinity for the current CPU. The guest CPU affinity is taken
1477
* from VMPIDR_EL2. The Redistributor corresponding to this CPU is
1478
* the Redistributor with the same affinity from GICR_TYPER.
1479
*/
1480
aff = (CPU_AFF3(vmpidr_el2) << 24) | (CPU_AFF2(vmpidr_el2) << 16) |
1481
(CPU_AFF1(vmpidr_el2) << 8) | CPU_AFF0(vmpidr_el2);
1482
1483
/* Set up GICR_TYPER. */
1484
gicr_typer = aff << GICR_TYPER_AFF_SHIFT;
1485
/* Set the vcpu as the processsor ID */
1486
gicr_typer |=
1487
(uint64_t)vcpu_vcpuid(hypctx->vcpu) << GICR_TYPER_CPUNUM_SHIFT;
1488
1489
if (last_vcpu)
1490
/* Mark the last Redistributor */
1491
gicr_typer |= GICR_TYPER_LAST;
1492
1493
*rval = gicr_typer;
1494
}
1495
1496
/*
1497
* SGI_base:
1498
*/
1499
/* GICR_ISENABLER0 */
1500
static void
1501
redist_ienabler0_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1502
void *arg)
1503
{
1504
*rval = read_enabler(hypctx, 0);
1505
}
1506
1507
static void
1508
redist_isenabler0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1509
u_int size, uint64_t wval, void *arg)
1510
{
1511
MPASS(offset == 0);
1512
MPASS(size == 4);
1513
write_enabler(hypctx, 0, true, wval);
1514
}
1515
1516
/* GICR_ICENABLER0 */
1517
static void
1518
redist_icenabler0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1519
u_int size, uint64_t wval, void *arg)
1520
{
1521
MPASS(offset == 0);
1522
MPASS(size == 4);
1523
write_enabler(hypctx, 0, false, wval);
1524
}
1525
1526
/* GICR_ISPENDR0 */
1527
static void
1528
redist_ipendr0_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1529
void *arg)
1530
{
1531
*rval = read_pendr(hypctx, 0);
1532
}
1533
1534
static void
1535
redist_ispendr0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1536
u_int size, uint64_t wval, void *arg)
1537
{
1538
MPASS(offset == 0);
1539
MPASS(size == 4);
1540
write_pendr(hypctx, 0, true, wval);
1541
}
1542
1543
/* GICR_ICPENDR0 */
1544
static void
1545
redist_icpendr0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1546
u_int size, uint64_t wval, void *arg)
1547
{
1548
MPASS(offset == 0);
1549
MPASS(size == 4);
1550
write_pendr(hypctx, 0, false, wval);
1551
}
1552
1553
/* GICR_ISACTIVER0 */
1554
static void
1555
redist_iactiver0_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1556
void *arg)
1557
{
1558
*rval = read_activer(hypctx, 0);
1559
}
1560
1561
static void
1562
redist_isactiver0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1563
u_int size, uint64_t wval, void *arg)
1564
{
1565
write_activer(hypctx, 0, true, wval);
1566
}
1567
1568
/* GICR_ICACTIVER0 */
1569
static void
1570
redist_icactiver0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1571
u_int size, uint64_t wval, void *arg)
1572
{
1573
write_activer(hypctx, 0, false, wval);
1574
}
1575
1576
/* GICR_IPRIORITYR */
1577
static void
1578
redist_ipriorityr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1579
void *arg)
1580
{
1581
int n;
1582
1583
n = (reg - GICR_IPRIORITYR(0)) / 4;
1584
*rval = read_priorityr(hypctx, n);
1585
}
1586
1587
static void
1588
redist_ipriorityr_write(struct hypctx *hypctx, u_int reg, u_int offset,
1589
u_int size, uint64_t wval, void *arg)
1590
{
1591
u_int irq_base;
1592
1593
irq_base = (reg - GICR_IPRIORITYR(0)) + offset;
1594
write_priorityr(hypctx, irq_base, size, wval);
1595
}
1596
1597
/* GICR_ICFGR1 */
1598
static void
1599
redist_icfgr1_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1600
{
1601
*rval = read_config(hypctx, 1);
1602
}
1603
1604
static void
1605
redist_icfgr1_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1606
uint64_t wval, void *arg)
1607
{
1608
MPASS(offset == 0);
1609
MPASS(size == 4);
1610
write_config(hypctx, 1, wval);
1611
}
1612
1613
static int
1614
redist_read(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t *rval,
1615
int size, void *arg)
1616
{
1617
struct hyp *hyp;
1618
struct hypctx *hypctx, *target_hypctx;
1619
struct vgic_v3 *vgic;
1620
uint64_t reg;
1621
int vcpuid;
1622
1623
/* Find the current vcpu ctx to get the vgic struct */
1624
hypctx = vcpu_get_cookie(vcpu);
1625
hyp = hypctx->hyp;
1626
vgic = hyp->vgic;
1627
1628
/* Check the register is one of ours and is the correct size */
1629
if (fault_ipa < vgic->redist_start ||
1630
fault_ipa + size > vgic->redist_end) {
1631
return (EINVAL);
1632
}
1633
1634
vcpuid = (fault_ipa - vgic->redist_start) /
1635
(GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1636
if (vcpuid >= vm_get_maxcpus(hyp->vm)) {
1637
/*
1638
* This should never happen, but lets be defensive so if it
1639
* does we don't panic a non-INVARIANTS kernel.
1640
*/
1641
#ifdef INVARIANTS
1642
panic("%s: Invalid vcpuid %d", __func__, vcpuid);
1643
#else
1644
*rval = 0;
1645
return (0);
1646
#endif
1647
}
1648
1649
/* Find the target vcpu ctx for the access */
1650
target_hypctx = hyp->ctx[vcpuid];
1651
if (target_hypctx == NULL) {
1652
/*
1653
* The CPU has not yet started. The redistributor and CPU are
1654
* in the same power domain. As such the redistributor will
1655
* also be powered down so any access will raise an external
1656
* abort.
1657
*/
1658
raise_data_insn_abort(hypctx, fault_ipa, true,
1659
ISS_DATA_DFSC_EXT);
1660
return (0);
1661
}
1662
1663
reg = (fault_ipa - vgic->redist_start) %
1664
(GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1665
1666
/*
1667
* As described in vgic_register_read an access with an invalid
1668
* alignment is read with an unknown value
1669
*/
1670
if ((reg & (size - 1)) != 0) {
1671
*rval = 0;
1672
return (0);
1673
}
1674
1675
if (reg < GICR_RD_BASE_SIZE) {
1676
if (vgic_register_read(target_hypctx, redist_rd_registers,
1677
nitems(redist_rd_registers), reg, size, rval, NULL))
1678
return (0);
1679
} else if (reg < (GICR_SGI_BASE + GICR_SGI_BASE_SIZE)) {
1680
if (vgic_register_read(target_hypctx, redist_sgi_registers,
1681
nitems(redist_sgi_registers), reg - GICR_SGI_BASE, size,
1682
rval, NULL))
1683
return (0);
1684
}
1685
1686
/* Reserved register addresses are RES0 so we can hardware it to 0 */
1687
*rval = 0;
1688
return (0);
1689
}
1690
1691
static int
1692
redist_write(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t wval,
1693
int size, void *arg)
1694
{
1695
struct hyp *hyp;
1696
struct hypctx *hypctx, *target_hypctx;
1697
struct vgic_v3 *vgic;
1698
uint64_t reg;
1699
int vcpuid;
1700
1701
/* Find the current vcpu ctx to get the vgic struct */
1702
hypctx = vcpu_get_cookie(vcpu);
1703
hyp = hypctx->hyp;
1704
vgic = hyp->vgic;
1705
1706
/* Check the register is one of ours and is the correct size */
1707
if (fault_ipa < vgic->redist_start ||
1708
fault_ipa + size > vgic->redist_end) {
1709
return (EINVAL);
1710
}
1711
1712
vcpuid = (fault_ipa - vgic->redist_start) /
1713
(GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1714
if (vcpuid >= vm_get_maxcpus(hyp->vm)) {
1715
/*
1716
* This should never happen, but lets be defensive so if it
1717
* does we don't panic a non-INVARIANTS kernel.
1718
*/
1719
#ifdef INVARIANTS
1720
panic("%s: Invalid vcpuid %d", __func__, vcpuid);
1721
#else
1722
return (0);
1723
#endif
1724
}
1725
1726
/* Find the target vcpu ctx for the access */
1727
target_hypctx = hyp->ctx[vcpuid];
1728
if (target_hypctx == NULL) {
1729
/*
1730
* The CPU has not yet started. The redistributor and CPU are
1731
* in the same power domain. As such the redistributor will
1732
* also be powered down so any access will raise an external
1733
* abort.
1734
*/
1735
raise_data_insn_abort(hypctx, fault_ipa, true,
1736
ISS_DATA_DFSC_EXT);
1737
return (0);
1738
}
1739
1740
reg = (fault_ipa - vgic->redist_start) %
1741
(GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1742
1743
/*
1744
* As described in vgic_register_read an access with an invalid
1745
* alignment is write ignored.
1746
*/
1747
if ((reg & (size - 1)) != 0)
1748
return (0);
1749
1750
if (reg < GICR_RD_BASE_SIZE) {
1751
if (vgic_register_write(target_hypctx, redist_rd_registers,
1752
nitems(redist_rd_registers), reg, size, wval, NULL))
1753
return (0);
1754
} else if (reg < (GICR_SGI_BASE + GICR_SGI_BASE_SIZE)) {
1755
if (vgic_register_write(target_hypctx, redist_sgi_registers,
1756
nitems(redist_sgi_registers), reg - GICR_SGI_BASE, size,
1757
wval, NULL))
1758
return (0);
1759
}
1760
1761
/* Reserved register addresses are RES0 so we can ignore the write */
1762
return (0);
1763
}
1764
1765
static int
1766
vgic_v3_icc_sgi1r_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
1767
{
1768
/*
1769
* TODO: Inject an unknown exception.
1770
*/
1771
*rval = 0;
1772
return (0);
1773
}
1774
1775
static int
1776
vgic_v3_icc_sgi1r_write(struct vcpu *vcpu, uint64_t rval, void *arg)
1777
{
1778
struct vm *vm;
1779
struct hyp *hyp;
1780
cpuset_t active_cpus;
1781
uint64_t mpidr, aff1, aff2, aff3;
1782
uint32_t irqid;
1783
int cpus, cpu_off, target_vcpuid, vcpuid;
1784
1785
vm = vcpu_vm(vcpu);
1786
hyp = vm_get_cookie(vm);
1787
active_cpus = vm_active_cpus(vm);
1788
vcpuid = vcpu_vcpuid(vcpu);
1789
1790
irqid = ICC_SGI1R_EL1_SGIID_VAL(rval) >> ICC_SGI1R_EL1_SGIID_SHIFT;
1791
if ((rval & ICC_SGI1R_EL1_IRM) == 0) {
1792
/* Non-zero points at no vcpus */
1793
if (ICC_SGI1R_EL1_RS_VAL(rval) != 0)
1794
return (0);
1795
1796
aff1 = ICC_SGI1R_EL1_AFF1_VAL(rval) >> ICC_SGI1R_EL1_AFF1_SHIFT;
1797
aff2 = ICC_SGI1R_EL1_AFF2_VAL(rval) >> ICC_SGI1R_EL1_AFF2_SHIFT;
1798
aff3 = ICC_SGI1R_EL1_AFF3_VAL(rval) >> ICC_SGI1R_EL1_AFF3_SHIFT;
1799
mpidr = aff3 << MPIDR_AFF3_SHIFT |
1800
aff2 << MPIDR_AFF2_SHIFT | aff1 << MPIDR_AFF1_SHIFT;
1801
1802
cpus = ICC_SGI1R_EL1_TL_VAL(rval) >> ICC_SGI1R_EL1_TL_SHIFT;
1803
cpu_off = 0;
1804
while (cpus > 0) {
1805
if (cpus & 1) {
1806
target_vcpuid = mpidr_to_vcpu(hyp,
1807
mpidr | (cpu_off << MPIDR_AFF0_SHIFT));
1808
if (target_vcpuid >= 0 &&
1809
CPU_ISSET(target_vcpuid, &active_cpus)) {
1810
INJECT_IRQ(hyp, target_vcpuid, irqid,
1811
true);
1812
}
1813
}
1814
cpu_off++;
1815
cpus >>= 1;
1816
}
1817
} else {
1818
/* Send an IPI to all CPUs other than the current CPU */
1819
for (target_vcpuid = 0; target_vcpuid < vm_get_maxcpus(vm);
1820
target_vcpuid++) {
1821
if (CPU_ISSET(target_vcpuid, &active_cpus) &&
1822
target_vcpuid != vcpuid) {
1823
INJECT_IRQ(hyp, target_vcpuid, irqid, true);
1824
}
1825
}
1826
}
1827
1828
return (0);
1829
}
1830
1831
static void
1832
vgic_v3_mmio_init(struct hyp *hyp)
1833
{
1834
struct vgic_v3 *vgic;
1835
struct vgic_v3_irq *irq;
1836
int i;
1837
1838
/* Allocate memory for the SPIs */
1839
vgic = hyp->vgic;
1840
vgic->irqs = malloc((VGIC_NIRQS - VGIC_PRV_I_NUM) *
1841
sizeof(*vgic->irqs), M_VGIC_V3, M_WAITOK | M_ZERO);
1842
1843
for (i = 0; i < VGIC_NIRQS - VGIC_PRV_I_NUM; i++) {
1844
irq = &vgic->irqs[i];
1845
1846
mtx_init(&irq->irq_spinmtx, "VGIC IRQ spinlock", NULL,
1847
MTX_SPIN);
1848
1849
irq->irq = i + VGIC_PRV_I_NUM;
1850
}
1851
}
1852
1853
static void
1854
vgic_v3_mmio_destroy(struct hyp *hyp)
1855
{
1856
struct vgic_v3 *vgic;
1857
struct vgic_v3_irq *irq;
1858
int i;
1859
1860
vgic = hyp->vgic;
1861
for (i = 0; i < VGIC_NIRQS - VGIC_PRV_I_NUM; i++) {
1862
irq = &vgic->irqs[i];
1863
1864
mtx_destroy(&irq->irq_spinmtx);
1865
}
1866
1867
free(vgic->irqs, M_VGIC_V3);
1868
}
1869
1870
static int
1871
vgic_v3_attach_to_vm(device_t dev, struct hyp *hyp, struct vm_vgic_descr *descr)
1872
{
1873
struct vm *vm;
1874
struct vgic_v3 *vgic;
1875
size_t cpu_count;
1876
1877
if (descr->ver.version != 3)
1878
return (EINVAL);
1879
1880
/*
1881
* The register bases need to be 64k aligned
1882
* The redist register space is the RD + SGI size
1883
*/
1884
if (!__is_aligned(descr->v3_regs.dist_start, PAGE_SIZE_64K) ||
1885
!__is_aligned(descr->v3_regs.redist_start, PAGE_SIZE_64K) ||
1886
!__is_aligned(descr->v3_regs.redist_size,
1887
GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE))
1888
return (EINVAL);
1889
1890
/* The dist register space is 1 64k block */
1891
if (descr->v3_regs.dist_size != PAGE_SIZE_64K)
1892
return (EINVAL);
1893
1894
vm = hyp->vm;
1895
1896
/*
1897
* Return an error if the redist space is too large for the maximum
1898
* number of CPUs we support.
1899
*/
1900
cpu_count = descr->v3_regs.redist_size /
1901
(GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1902
if (cpu_count > vm_get_maxcpus(vm))
1903
return (EINVAL);
1904
1905
vgic = hyp->vgic;
1906
1907
/* Set the distributor address and size for trapping guest access. */
1908
vgic->dist_start = descr->v3_regs.dist_start;
1909
vgic->dist_end = descr->v3_regs.dist_start + descr->v3_regs.dist_size;
1910
1911
vgic->redist_start = descr->v3_regs.redist_start;
1912
vgic->redist_end = descr->v3_regs.redist_start +
1913
descr->v3_regs.redist_size;
1914
1915
vm_register_inst_handler(vm, descr->v3_regs.dist_start,
1916
descr->v3_regs.dist_size, dist_read, dist_write);
1917
vm_register_inst_handler(vm, descr->v3_regs.redist_start,
1918
descr->v3_regs.redist_size, redist_read, redist_write);
1919
1920
vm_register_reg_handler(vm, ISS_MSR_REG(ICC_SGI1R_EL1),
1921
ISS_MSR_REG_MASK, vgic_v3_icc_sgi1r_read, vgic_v3_icc_sgi1r_write,
1922
NULL);
1923
1924
vgic_v3_mmio_init(hyp);
1925
1926
hyp->vgic_attached = true;
1927
1928
return (0);
1929
}
1930
1931
static void
1932
vgic_v3_detach_from_vm(device_t dev, struct hyp *hyp)
1933
{
1934
if (hyp->vgic_attached) {
1935
hyp->vgic_attached = false;
1936
vgic_v3_mmio_destroy(hyp);
1937
}
1938
}
1939
1940
static struct vgic_v3_irq *
1941
vgic_v3_get_irq(struct hyp *hyp, int vcpuid, uint32_t irqid)
1942
{
1943
struct vgic_v3_cpu *vgic_cpu;
1944
struct vgic_v3_irq *irq;
1945
struct hypctx *hypctx;
1946
1947
if (irqid < VGIC_PRV_I_NUM) {
1948
if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(hyp->vm))
1949
return (NULL);
1950
hypctx = hyp->ctx[vcpuid];
1951
if (hypctx == NULL)
1952
return (NULL);
1953
vgic_cpu = hypctx->vgic_cpu;
1954
irq = &vgic_cpu->private_irqs[irqid];
1955
} else if (irqid <= GIC_LAST_SPI) {
1956
irqid -= VGIC_PRV_I_NUM;
1957
if (irqid >= VGIC_NIRQS)
1958
return (NULL);
1959
irq = &hyp->vgic->irqs[irqid];
1960
} else if (irqid < GIC_FIRST_LPI) {
1961
return (NULL);
1962
} else {
1963
/* No support for LPIs */
1964
return (NULL);
1965
}
1966
1967
mtx_lock_spin(&irq->irq_spinmtx);
1968
return (irq);
1969
}
1970
1971
static void
1972
vgic_v3_release_irq(struct vgic_v3_irq *irq)
1973
{
1974
1975
mtx_unlock_spin(&irq->irq_spinmtx);
1976
}
1977
1978
static bool
1979
vgic_v3_has_pending_irq(device_t dev, struct hypctx *hypctx)
1980
{
1981
struct vgic_v3_cpu *vgic_cpu;
1982
bool empty;
1983
1984
vgic_cpu = hypctx->vgic_cpu;
1985
mtx_lock_spin(&vgic_cpu->lr_mtx);
1986
empty = TAILQ_EMPTY(&vgic_cpu->irq_act_pend);
1987
mtx_unlock_spin(&vgic_cpu->lr_mtx);
1988
1989
return (!empty);
1990
}
1991
1992
static bool
1993
vgic_v3_check_irq(struct vgic_v3_irq *irq, bool level)
1994
{
1995
/*
1996
* Only inject if:
1997
* - Level-triggered IRQ: level changes low -> high
1998
* - Edge-triggered IRQ: level is high
1999
*/
2000
switch (irq->config & VGIC_CONFIG_MASK) {
2001
case VGIC_CONFIG_LEVEL:
2002
return (level != irq->level);
2003
case VGIC_CONFIG_EDGE:
2004
return (level);
2005
default:
2006
break;
2007
}
2008
2009
return (false);
2010
}
2011
2012
static int
2013
vgic_v3_inject_irq(device_t dev, struct hyp *hyp, int vcpuid, uint32_t irqid,
2014
bool level)
2015
{
2016
struct vgic_v3_cpu *vgic_cpu;
2017
struct vgic_v3_irq *irq;
2018
struct hypctx *hypctx;
2019
int target_vcpu;
2020
bool notify;
2021
2022
if (!hyp->vgic_attached)
2023
return (ENODEV);
2024
2025
KASSERT(vcpuid == -1 || irqid < VGIC_PRV_I_NUM,
2026
("%s: SPI/LPI with vcpuid set: irq %u vcpuid %u", __func__, irqid,
2027
vcpuid));
2028
2029
irq = vgic_v3_get_irq(hyp, vcpuid, irqid);
2030
if (irq == NULL) {
2031
eprintf("Malformed IRQ %u.\n", irqid);
2032
return (EINVAL);
2033
}
2034
2035
target_vcpu = irq->target_vcpu;
2036
KASSERT(vcpuid == -1 || vcpuid == target_vcpu,
2037
("%s: Interrupt %u has bad cpu affinity: vcpu %d target vcpu %d",
2038
__func__, irqid, vcpuid, target_vcpu));
2039
KASSERT(target_vcpu >= 0 && target_vcpu < vm_get_maxcpus(hyp->vm),
2040
("%s: Interrupt %u sent to invalid vcpu %d", __func__, irqid,
2041
target_vcpu));
2042
2043
if (vcpuid == -1)
2044
vcpuid = target_vcpu;
2045
/* TODO: Check from 0 to vm->maxcpus */
2046
if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(hyp->vm)) {
2047
vgic_v3_release_irq(irq);
2048
return (EINVAL);
2049
}
2050
2051
hypctx = hyp->ctx[vcpuid];
2052
if (hypctx == NULL) {
2053
vgic_v3_release_irq(irq);
2054
return (EINVAL);
2055
}
2056
2057
notify = false;
2058
vgic_cpu = hypctx->vgic_cpu;
2059
2060
mtx_lock_spin(&vgic_cpu->lr_mtx);
2061
2062
if (!vgic_v3_check_irq(irq, level)) {
2063
goto out;
2064
}
2065
2066
if ((irq->config & VGIC_CONFIG_MASK) == VGIC_CONFIG_LEVEL)
2067
irq->level = level;
2068
else /* VGIC_CONFIG_EDGE */
2069
irq->pending = true;
2070
2071
notify = vgic_v3_queue_irq(hyp, vgic_cpu, vcpuid, irq);
2072
2073
out:
2074
mtx_unlock_spin(&vgic_cpu->lr_mtx);
2075
vgic_v3_release_irq(irq);
2076
2077
if (notify)
2078
vcpu_notify_event(vm_vcpu(hyp->vm, vcpuid));
2079
2080
return (0);
2081
}
2082
2083
static int
2084
vgic_v3_inject_msi(device_t dev, struct hyp *hyp, uint64_t msg, uint64_t addr)
2085
{
2086
struct vgic_v3 *vgic;
2087
uint64_t reg;
2088
2089
vgic = hyp->vgic;
2090
2091
/* This is a 4 byte register */
2092
if (addr < vgic->dist_start || addr + 4 > vgic->dist_end) {
2093
return (EINVAL);
2094
}
2095
2096
reg = addr - vgic->dist_start;
2097
if (reg != GICD_SETSPI_NSR)
2098
return (EINVAL);
2099
2100
return (INJECT_IRQ(hyp, -1, msg, true));
2101
}
2102
2103
static void
2104
vgic_v3_flush_hwstate(device_t dev, struct hypctx *hypctx)
2105
{
2106
struct vgic_v3_cpu *vgic_cpu;
2107
struct vgic_v3_irq *irq;
2108
int i;
2109
2110
vgic_cpu = hypctx->vgic_cpu;
2111
2112
/*
2113
* All Distributor writes have been executed at this point, do not
2114
* protect Distributor reads with a mutex.
2115
*
2116
* This is callled with all interrupts disabled, so there is no need for
2117
* a List Register spinlock either.
2118
*/
2119
mtx_lock_spin(&vgic_cpu->lr_mtx);
2120
2121
hypctx->vgic_v3_regs.ich_hcr_el2 &= ~ICH_HCR_EL2_UIE;
2122
2123
/* Exit early if there are no buffered interrupts */
2124
if (TAILQ_EMPTY(&vgic_cpu->irq_act_pend))
2125
goto out;
2126
2127
KASSERT(vgic_cpu->ich_lr_used == 0, ("%s: Used LR count not zero %u",
2128
__func__, vgic_cpu->ich_lr_used));
2129
2130
i = 0;
2131
hypctx->vgic_v3_regs.ich_elrsr_el2 =
2132
(1u << hypctx->vgic_v3_regs.ich_lr_num) - 1;
2133
TAILQ_FOREACH(irq, &vgic_cpu->irq_act_pend, act_pend_list) {
2134
/* No free list register, stop searching for IRQs */
2135
if (i == hypctx->vgic_v3_regs.ich_lr_num)
2136
break;
2137
2138
if (!irq->enabled)
2139
continue;
2140
2141
hypctx->vgic_v3_regs.ich_lr_el2[i] = ICH_LR_EL2_GROUP1 |
2142
((uint64_t)irq->priority << ICH_LR_EL2_PRIO_SHIFT) |
2143
irq->irq;
2144
2145
if (irq->active) {
2146
hypctx->vgic_v3_regs.ich_lr_el2[i] |=
2147
ICH_LR_EL2_STATE_ACTIVE;
2148
}
2149
2150
#ifdef notyet
2151
/* TODO: Check why this is needed */
2152
if ((irq->config & _MASK) == LEVEL)
2153
hypctx->vgic_v3_regs.ich_lr_el2[i] |= ICH_LR_EL2_EOI;
2154
#endif
2155
2156
if (!irq->active && vgic_v3_irq_pending(irq)) {
2157
hypctx->vgic_v3_regs.ich_lr_el2[i] |=
2158
ICH_LR_EL2_STATE_PENDING;
2159
2160
/*
2161
* This IRQ is now pending on the guest. Allow for
2162
* another edge that could cause the interrupt to
2163
* be raised again.
2164
*/
2165
if ((irq->config & VGIC_CONFIG_MASK) ==
2166
VGIC_CONFIG_EDGE) {
2167
irq->pending = false;
2168
}
2169
}
2170
2171
i++;
2172
}
2173
vgic_cpu->ich_lr_used = i;
2174
2175
out:
2176
mtx_unlock_spin(&vgic_cpu->lr_mtx);
2177
}
2178
2179
static void
2180
vgic_v3_sync_hwstate(device_t dev, struct hypctx *hypctx)
2181
{
2182
struct vgic_v3_cpu *vgic_cpu;
2183
struct vgic_v3_irq *irq;
2184
uint64_t lr;
2185
int i;
2186
2187
vgic_cpu = hypctx->vgic_cpu;
2188
2189
/* Exit early if there are no buffered interrupts */
2190
if (vgic_cpu->ich_lr_used == 0)
2191
return;
2192
2193
/*
2194
* Check on the IRQ state after running the guest. ich_lr_used and
2195
* ich_lr_el2 are only ever used within this thread so is safe to
2196
* access unlocked.
2197
*/
2198
for (i = 0; i < vgic_cpu->ich_lr_used; i++) {
2199
lr = hypctx->vgic_v3_regs.ich_lr_el2[i];
2200
hypctx->vgic_v3_regs.ich_lr_el2[i] = 0;
2201
2202
irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
2203
ICH_LR_EL2_VINTID(lr));
2204
if (irq == NULL)
2205
continue;
2206
2207
irq->active = (lr & ICH_LR_EL2_STATE_ACTIVE) != 0;
2208
2209
if ((irq->config & VGIC_CONFIG_MASK) == VGIC_CONFIG_EDGE) {
2210
/*
2211
* If we have an edge triggered IRQ preserve the
2212
* pending bit until the IRQ has been handled.
2213
*/
2214
if ((lr & ICH_LR_EL2_STATE_PENDING) != 0) {
2215
irq->pending = true;
2216
}
2217
} else {
2218
/*
2219
* If we have a level triggerend IRQ remove the
2220
* pending bit if the IRQ has been handled.
2221
* The level is separate, so may still be high
2222
* triggering another IRQ.
2223
*/
2224
if ((lr & ICH_LR_EL2_STATE_PENDING) == 0) {
2225
irq->pending = false;
2226
}
2227
}
2228
2229
/* Lock to update irq_act_pend */
2230
mtx_lock_spin(&vgic_cpu->lr_mtx);
2231
if (irq->active) {
2232
/* Ensure the active IRQ is at the head of the list */
2233
TAILQ_REMOVE(&vgic_cpu->irq_act_pend, irq,
2234
act_pend_list);
2235
TAILQ_INSERT_HEAD(&vgic_cpu->irq_act_pend, irq,
2236
act_pend_list);
2237
} else if (!vgic_v3_irq_pending(irq)) {
2238
/* If pending or active remove from the list */
2239
TAILQ_REMOVE(&vgic_cpu->irq_act_pend, irq,
2240
act_pend_list);
2241
irq->on_aplist = false;
2242
}
2243
mtx_unlock_spin(&vgic_cpu->lr_mtx);
2244
vgic_v3_release_irq(irq);
2245
}
2246
2247
hypctx->vgic_v3_regs.ich_hcr_el2 &= ~ICH_HCR_EL2_EOICOUNT_MASK;
2248
vgic_cpu->ich_lr_used = 0;
2249
}
2250
2251
static void
2252
vgic_v3_init(device_t dev)
2253
{
2254
uint64_t ich_vtr_el2;
2255
uint32_t pribits, prebits;
2256
2257
ich_vtr_el2 = vmm_read_reg(HYP_REG_ICH_VTR);
2258
2259
/* TODO: These fields are common with the vgicv2 driver */
2260
pribits = ICH_VTR_EL2_PRIBITS(ich_vtr_el2);
2261
switch (pribits) {
2262
default:
2263
case 5:
2264
virt_features.min_prio = 0xf8;
2265
break;
2266
case 6:
2267
virt_features.min_prio = 0xfc;
2268
break;
2269
case 7:
2270
virt_features.min_prio = 0xfe;
2271
break;
2272
case 8:
2273
virt_features.min_prio = 0xff;
2274
break;
2275
}
2276
2277
prebits = ICH_VTR_EL2_PREBITS(ich_vtr_el2);
2278
switch (prebits) {
2279
default:
2280
case 5:
2281
virt_features.ich_apr_num = 1;
2282
break;
2283
case 6:
2284
virt_features.ich_apr_num = 2;
2285
break;
2286
case 7:
2287
virt_features.ich_apr_num = 4;
2288
break;
2289
}
2290
2291
virt_features.ich_lr_num = ICH_VTR_EL2_LISTREGS(ich_vtr_el2);
2292
}
2293
2294
static int
2295
vgic_v3_probe(device_t dev)
2296
{
2297
if (!gic_get_vgic(dev))
2298
return (EINVAL);
2299
2300
/* We currently only support the GICv3 */
2301
if (gic_get_hw_rev(dev) < 3)
2302
return (EINVAL);
2303
2304
device_set_desc(dev, "Virtual GIC v3");
2305
return (BUS_PROBE_DEFAULT);
2306
}
2307
2308
static int
2309
vgic_v3_attach(device_t dev)
2310
{
2311
vgic_dev = dev;
2312
return (0);
2313
}
2314
2315
static int
2316
vgic_v3_detach(device_t dev)
2317
{
2318
vgic_dev = NULL;
2319
return (0);
2320
}
2321
2322
static device_method_t vgic_v3_methods[] = {
2323
/* Device interface */
2324
DEVMETHOD(device_probe, vgic_v3_probe),
2325
DEVMETHOD(device_attach, vgic_v3_attach),
2326
DEVMETHOD(device_detach, vgic_v3_detach),
2327
2328
/* VGIC interface */
2329
DEVMETHOD(vgic_init, vgic_v3_init),
2330
DEVMETHOD(vgic_attach_to_vm, vgic_v3_attach_to_vm),
2331
DEVMETHOD(vgic_detach_from_vm, vgic_v3_detach_from_vm),
2332
DEVMETHOD(vgic_vminit, vgic_v3_vminit),
2333
DEVMETHOD(vgic_cpuinit, vgic_v3_cpuinit),
2334
DEVMETHOD(vgic_cpucleanup, vgic_v3_cpucleanup),
2335
DEVMETHOD(vgic_vmcleanup, vgic_v3_vmcleanup),
2336
DEVMETHOD(vgic_max_cpu_count, vgic_v3_max_cpu_count),
2337
DEVMETHOD(vgic_has_pending_irq, vgic_v3_has_pending_irq),
2338
DEVMETHOD(vgic_inject_irq, vgic_v3_inject_irq),
2339
DEVMETHOD(vgic_inject_msi, vgic_v3_inject_msi),
2340
DEVMETHOD(vgic_flush_hwstate, vgic_v3_flush_hwstate),
2341
DEVMETHOD(vgic_sync_hwstate, vgic_v3_sync_hwstate),
2342
2343
/* End */
2344
DEVMETHOD_END
2345
};
2346
2347
/* TODO: Create a vgic base class? */
2348
DEFINE_CLASS_0(vgic, vgic_v3_driver, vgic_v3_methods, 0);
2349
2350
DRIVER_MODULE(vgic_v3, gic, vgic_v3_driver, 0, 0);
2351
2352