Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/riscv/vmm/vmm_aplic.c
108136 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2024 Ruslan Bukin <[email protected]>
5
*
6
* This software was developed by the University of Cambridge Computer
7
* Laboratory (Department of Computer Science and Technology) under Innovate
8
* UK project 105694, "Digital Security by Design (DSbD) Technology Platform
9
* Prototype".
10
*
11
* Redistribution and use in source and binary forms, with or without
12
* modification, are permitted provided that the following conditions
13
* are met:
14
* 1. Redistributions of source code must retain the above copyright
15
* notice, this list of conditions and the following disclaimer.
16
* 2. Redistributions in binary form must reproduce the above copyright
17
* notice, this list of conditions and the following disclaimer in the
18
* documentation and/or other materials provided with the distribution.
19
*
20
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30
* SUCH DAMAGE.
31
*/
32
33
#include <sys/types.h>
34
#include <sys/errno.h>
35
#include <sys/systm.h>
36
#include <sys/bus.h>
37
#include <sys/kernel.h>
38
#include <sys/lock.h>
39
#include <sys/malloc.h>
40
#include <sys/module.h>
41
#include <sys/mutex.h>
42
#include <sys/rman.h>
43
#include <sys/smp.h>
44
45
#include <riscv/vmm/riscv.h>
46
#include <riscv/vmm/vmm_aplic.h>
47
48
#include <machine/vmm_instruction_emul.h>
49
50
#include <dev/vmm/vmm_dev.h>
51
#include <dev/vmm/vmm_vm.h>
52
53
MALLOC_DEFINE(M_APLIC, "RISC-V VMM APLIC", "RISC-V AIA APLIC");
54
55
#define APLIC_DOMAINCFG 0x0000
56
#define DOMAINCFG_IE (1 << 8) /* Interrupt Enable. */
57
#define DOMAINCFG_DM (1 << 2) /* Direct Mode. */
58
#define DOMAINCFG_BE (1 << 0) /* Big-Endian. */
59
#define APLIC_SOURCECFG(x) (0x0004 + ((x) - 1) * 4)
60
#define SOURCECFG_D (1 << 10) /* D - Delegate. */
61
/* If D == 0. */
62
#define SOURCECFG_SM_S (0)
63
#define SOURCECFG_SM_M (0x7 << SOURCECFG_SM_S)
64
#define SOURCECFG_SM_INACTIVE (0) /* Not delegated. */
65
#define SOURCECFG_SM_DETACHED (1)
66
#define SOURCECFG_SM_RESERVED (2)
67
#define SOURCECFG_SM_RESERVED1 (3)
68
#define SOURCECFG_SM_EDGE1 (4) /* Rising edge. */
69
#define SOURCECFG_SM_EDGE0 (5) /* Falling edge. */
70
#define SOURCECFG_SM_LEVEL1 (6) /* High. */
71
#define SOURCECFG_SM_LEVEL0 (7) /* Low. */
72
/* If D == 1. */
73
#define SOURCECFG_CHILD_INDEX_S (0)
74
#define SOURCECFG_CHILD_INDEX_M (0x3ff << SOURCECFG_CHILD_INDEX_S)
75
#define APLIC_SETIP 0x1c00
76
#define APLIC_SETIPNUM 0x1cdc
77
#define APLIC_CLRIP 0x1d00
78
#define APLIC_CLRIPNUM 0x1ddc
79
#define APLIC_SETIE 0x1e00
80
#define APLIC_SETIENUM 0x1edc
81
#define APLIC_CLRIE 0x1f00
82
#define APLIC_CLRIENUM 0x1fdc
83
#define APLIC_GENMSI 0x3000
84
#define APLIC_TARGET(x) (0x3004 + ((x) - 1) * 4)
85
#define TARGET_HART_S 18
86
#define TARGET_HART_M 0x3fff
87
#define APLIC_IDC(x) (0x4000 + (x) * 32)
88
#define IDC_IDELIVERY(x) (APLIC_IDC(x) + 0x0)
89
#define IDC_IFORCE(x) (APLIC_IDC(x) + 0x4)
90
#define IDC_ITHRESHOLD(x) (APLIC_IDC(x) + 0x8)
91
#define IDC_TOPI(x) (APLIC_IDC(x) + 0x18)
92
#define IDC_CLAIMI(x) (APLIC_IDC(x) + 0x1C)
93
#define CLAIMI_IRQ_S (16)
94
#define CLAIMI_IRQ_M (0x3ff << CLAIMI_IRQ_S)
95
#define CLAIMI_PRIO_S (0)
96
#define CLAIMI_PRIO_M (0xff << CLAIMI_PRIO_S)
97
98
#define APLIC_NIRQS 63
99
100
struct aplic_irq {
101
uint32_t sourcecfg;
102
uint32_t state;
103
#define APLIC_IRQ_STATE_PENDING (1 << 0)
104
#define APLIC_IRQ_STATE_ENABLED (1 << 1)
105
#define APLIC_IRQ_STATE_INPUT (1 << 2)
106
uint32_t target;
107
uint32_t target_hart;
108
};
109
110
struct aplic {
111
uint32_t mem_start;
112
uint32_t mem_end;
113
struct mtx mtx;
114
struct aplic_irq *irqs;
115
int nirqs;
116
uint32_t domaincfg;
117
};
118
119
static int
120
aplic_handle_sourcecfg(struct aplic *aplic, int i, bool write, uint64_t *val)
121
{
122
struct aplic_irq *irq;
123
124
if (i <= 0 || i > aplic->nirqs)
125
return (ENOENT);
126
127
mtx_lock_spin(&aplic->mtx);
128
irq = &aplic->irqs[i];
129
if (write)
130
irq->sourcecfg = *val;
131
else
132
*val = irq->sourcecfg;
133
mtx_unlock_spin(&aplic->mtx);
134
135
return (0);
136
}
137
138
static int
139
aplic_set_enabled(struct aplic *aplic, bool write, uint64_t *val, bool enabled)
140
{
141
struct aplic_irq *irq;
142
int i;
143
144
if (!write) {
145
*val = 0;
146
return (0);
147
}
148
149
i = *val;
150
if (i <= 0 || i > aplic->nirqs)
151
return (-1);
152
153
irq = &aplic->irqs[i];
154
155
mtx_lock_spin(&aplic->mtx);
156
if ((irq->sourcecfg & SOURCECFG_SM_M) != SOURCECFG_SM_INACTIVE) {
157
if (enabled)
158
irq->state |= APLIC_IRQ_STATE_ENABLED;
159
else
160
irq->state &= ~APLIC_IRQ_STATE_ENABLED;
161
}
162
mtx_unlock_spin(&aplic->mtx);
163
164
return (0);
165
}
166
167
static void
168
aplic_set_enabled_word(struct aplic *aplic, bool write, uint32_t word,
169
uint64_t *val, bool enabled)
170
{
171
uint64_t v;
172
int i;
173
174
if (!write) {
175
*val = 0;
176
return;
177
}
178
179
/*
180
* The write is ignored if value written is not an active interrupt
181
* source number in the domain.
182
*/
183
for (i = 0; i < 32; i++)
184
if (*val & (1u << i)) {
185
v = word * 32 + i;
186
(void)aplic_set_enabled(aplic, write, &v, enabled);
187
}
188
}
189
190
static int
191
aplic_handle_target(struct aplic *aplic, int i, bool write, uint64_t *val)
192
{
193
struct aplic_irq *irq;
194
195
mtx_lock_spin(&aplic->mtx);
196
irq = &aplic->irqs[i];
197
if (write) {
198
irq->target = *val;
199
irq->target_hart = (irq->target >> TARGET_HART_S);
200
} else
201
*val = irq->target;
202
mtx_unlock_spin(&aplic->mtx);
203
204
return (0);
205
}
206
207
static int
208
aplic_handle_idc_claimi(struct hyp *hyp, struct aplic *aplic, int cpu_id,
209
bool write, uint64_t *val)
210
{
211
struct aplic_irq *irq;
212
bool found;
213
int i;
214
215
/* Writes to claimi are ignored. */
216
if (write)
217
return (-1);
218
219
found = false;
220
221
mtx_lock_spin(&aplic->mtx);
222
for (i = 0; i < aplic->nirqs; i++) {
223
irq = &aplic->irqs[i];
224
if (irq->target_hart != cpu_id)
225
continue;
226
if (irq->state & APLIC_IRQ_STATE_PENDING) {
227
*val = (i << CLAIMI_IRQ_S) | (0 << CLAIMI_PRIO_S);
228
irq->state &= ~APLIC_IRQ_STATE_PENDING;
229
found = true;
230
break;
231
}
232
}
233
mtx_unlock_spin(&aplic->mtx);
234
235
if (found == false)
236
*val = 0;
237
238
return (0);
239
}
240
241
static int
242
aplic_handle_idc(struct hyp *hyp, struct aplic *aplic, int cpu, int reg,
243
bool write, uint64_t *val)
244
{
245
int error;
246
247
switch (reg + APLIC_IDC(0)) {
248
case IDC_IDELIVERY(0):
249
case IDC_IFORCE(0):
250
case IDC_ITHRESHOLD(0):
251
case IDC_TOPI(0):
252
error = 0;
253
break;
254
case IDC_CLAIMI(0):
255
error = aplic_handle_idc_claimi(hyp, aplic, cpu, write, val);
256
break;
257
default:
258
error = ENOENT;
259
}
260
261
return (error);
262
}
263
264
static int
265
aplic_mmio_access(struct hyp *hyp, struct aplic *aplic, uint64_t reg,
266
bool write, uint64_t *val)
267
{
268
int error;
269
int cpu;
270
int r;
271
int i;
272
273
dprintf("%s: reg %lx\n", __func__, reg);
274
275
if ((reg >= APLIC_SOURCECFG(1)) &&
276
(reg <= APLIC_SOURCECFG(aplic->nirqs))) {
277
i = ((reg - APLIC_SOURCECFG(1)) >> 2) + 1;
278
error = aplic_handle_sourcecfg(aplic, i, write, val);
279
return (error);
280
}
281
282
if ((reg >= APLIC_TARGET(1)) && (reg <= APLIC_TARGET(aplic->nirqs))) {
283
i = ((reg - APLIC_TARGET(1)) >> 2) + 1;
284
error = aplic_handle_target(aplic, i, write, val);
285
return (error);
286
}
287
288
if ((reg >= APLIC_IDC(0)) && (reg < APLIC_IDC(mp_ncpus))) {
289
cpu = (reg - APLIC_IDC(0)) >> 5;
290
r = (reg - APLIC_IDC(0)) % 32;
291
error = aplic_handle_idc(hyp, aplic, cpu, r, write, val);
292
return (error);
293
}
294
295
if ((reg >= APLIC_CLRIE) && (reg < (APLIC_CLRIE + aplic->nirqs * 4))) {
296
i = (reg - APLIC_CLRIE) >> 2;
297
aplic_set_enabled_word(aplic, write, i, val, false);
298
return (0);
299
}
300
301
switch (reg) {
302
case APLIC_DOMAINCFG:
303
mtx_lock_spin(&aplic->mtx);
304
if (write)
305
aplic->domaincfg = *val & DOMAINCFG_IE;
306
else
307
*val = aplic->domaincfg;
308
mtx_unlock_spin(&aplic->mtx);
309
error = 0;
310
break;
311
case APLIC_SETIENUM:
312
error = aplic_set_enabled(aplic, write, val, true);
313
break;
314
case APLIC_CLRIENUM:
315
error = aplic_set_enabled(aplic, write, val, false);
316
break;
317
default:
318
dprintf("%s: unknown reg %lx", __func__, reg);
319
error = ENOENT;
320
break;
321
};
322
323
return (error);
324
}
325
326
static int
327
mem_read(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t *rval, int size,
328
void *arg)
329
{
330
struct hypctx *hypctx;
331
struct hyp *hyp;
332
struct aplic *aplic;
333
uint64_t reg;
334
uint64_t val;
335
int error;
336
337
hypctx = vcpu_get_cookie(vcpu);
338
hyp = hypctx->hyp;
339
aplic = hyp->aplic;
340
341
dprintf("%s: fault_ipa %lx size %d\n", __func__, fault_ipa, size);
342
343
if (fault_ipa < aplic->mem_start || fault_ipa + size > aplic->mem_end)
344
return (EINVAL);
345
346
reg = fault_ipa - aplic->mem_start;
347
348
error = aplic_mmio_access(hyp, aplic, reg, false, &val);
349
if (error == 0)
350
*rval = val;
351
352
return (error);
353
}
354
355
static int
356
mem_write(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t wval, int size,
357
void *arg)
358
{
359
struct hypctx *hypctx;
360
struct hyp *hyp;
361
struct aplic *aplic;
362
uint64_t reg;
363
uint64_t val;
364
int error;
365
366
hypctx = vcpu_get_cookie(vcpu);
367
hyp = hypctx->hyp;
368
aplic = hyp->aplic;
369
370
dprintf("%s: fault_ipa %lx wval %lx size %d\n", __func__, fault_ipa,
371
wval, size);
372
373
if (fault_ipa < aplic->mem_start || fault_ipa + size > aplic->mem_end)
374
return (EINVAL);
375
376
reg = fault_ipa - aplic->mem_start;
377
378
val = wval;
379
380
error = aplic_mmio_access(hyp, aplic, reg, true, &val);
381
382
return (error);
383
}
384
385
void
386
aplic_vminit(struct hyp *hyp)
387
{
388
struct aplic *aplic;
389
390
hyp->aplic = malloc(sizeof(*hyp->aplic), M_APLIC,
391
M_WAITOK | M_ZERO);
392
aplic = hyp->aplic;
393
394
mtx_init(&aplic->mtx, "APLIC lock", NULL, MTX_SPIN);
395
}
396
397
void
398
aplic_vmcleanup(struct hyp *hyp)
399
{
400
struct aplic *aplic;
401
402
aplic = hyp->aplic;
403
404
mtx_destroy(&aplic->mtx);
405
406
free(hyp->aplic, M_APLIC);
407
}
408
409
int
410
aplic_attach_to_vm(struct hyp *hyp, struct vm_aplic_descr *descr)
411
{
412
struct aplic *aplic;
413
struct vm *vm;
414
415
vm = hyp->vm;
416
417
dprintf("%s\n", __func__);
418
419
vm_register_inst_handler(vm, descr->mem_start, descr->mem_size,
420
mem_read, mem_write);
421
422
aplic = hyp->aplic;
423
aplic->nirqs = APLIC_NIRQS;
424
aplic->mem_start = descr->mem_start;
425
aplic->mem_end = descr->mem_start + descr->mem_size;
426
aplic->irqs = malloc(sizeof(struct aplic_irq) * aplic->nirqs, M_APLIC,
427
M_WAITOK | M_ZERO);
428
429
hyp->aplic_attached = true;
430
431
return (0);
432
}
433
434
void
435
aplic_detach_from_vm(struct hyp *hyp)
436
{
437
struct aplic *aplic;
438
439
aplic = hyp->aplic;
440
441
dprintf("%s\n", __func__);
442
443
if (hyp->aplic_attached) {
444
hyp->aplic_attached = false;
445
free(aplic->irqs, M_APLIC);
446
}
447
}
448
449
int
450
aplic_check_pending(struct hypctx *hypctx)
451
{
452
struct aplic_irq *irq;
453
struct aplic *aplic;
454
struct hyp *hyp;
455
int i;
456
457
hyp = hypctx->hyp;
458
aplic = hyp->aplic;
459
460
mtx_lock_spin(&aplic->mtx);
461
if ((aplic->domaincfg & DOMAINCFG_IE) == 0) {
462
mtx_unlock_spin(&aplic->mtx);
463
return (0);
464
}
465
466
for (i = 0; i < aplic->nirqs; i++) {
467
irq = &aplic->irqs[i];
468
if (irq->target_hart != hypctx->cpu_id)
469
continue;
470
if ((irq->state & APLIC_IRQ_STATE_ENABLED) &&
471
(irq->state & APLIC_IRQ_STATE_PENDING)) {
472
mtx_unlock_spin(&aplic->mtx);
473
/* Found. */
474
return (1);
475
}
476
}
477
mtx_unlock_spin(&aplic->mtx);
478
479
return (0);
480
}
481
482
int
483
aplic_inject_irq(struct hyp *hyp, int vcpuid, uint32_t irqid, bool level)
484
{
485
struct aplic_irq *irq;
486
struct aplic *aplic;
487
bool notify;
488
int error;
489
int mask;
490
491
aplic = hyp->aplic;
492
493
error = 0;
494
495
mtx_lock_spin(&aplic->mtx);
496
if ((aplic->domaincfg & DOMAINCFG_IE) == 0) {
497
mtx_unlock_spin(&aplic->mtx);
498
return (error);
499
}
500
501
irq = &aplic->irqs[irqid];
502
if (irq->sourcecfg & SOURCECFG_D) {
503
mtx_unlock_spin(&aplic->mtx);
504
return (error);
505
}
506
507
notify = false;
508
switch (irq->sourcecfg & SOURCECFG_SM_M) {
509
case SOURCECFG_SM_LEVEL0:
510
if (!level)
511
irq->state |= APLIC_IRQ_STATE_PENDING;
512
break;
513
case SOURCECFG_SM_LEVEL1:
514
if (level)
515
irq->state |= APLIC_IRQ_STATE_PENDING;
516
break;
517
case SOURCECFG_SM_EDGE0:
518
if (!level && (irq->state & APLIC_IRQ_STATE_INPUT))
519
irq->state |= APLIC_IRQ_STATE_PENDING;
520
break;
521
case SOURCECFG_SM_EDGE1:
522
if (level && !(irq->state & APLIC_IRQ_STATE_INPUT))
523
irq->state |= APLIC_IRQ_STATE_PENDING;
524
break;
525
case SOURCECFG_SM_DETACHED:
526
case SOURCECFG_SM_INACTIVE:
527
break;
528
default:
529
error = ENXIO;
530
break;
531
}
532
533
if (level)
534
irq->state |= APLIC_IRQ_STATE_INPUT;
535
else
536
irq->state &= ~APLIC_IRQ_STATE_INPUT;
537
538
mask = APLIC_IRQ_STATE_ENABLED | APLIC_IRQ_STATE_PENDING;
539
if ((irq->state & mask) == mask)
540
notify = true;
541
542
mtx_unlock_spin(&aplic->mtx);
543
544
if (notify)
545
vcpu_notify_event(vm_vcpu(hyp->vm, irq->target_hart));
546
547
return (error);
548
}
549
550
int
551
aplic_inject_msi(struct hyp *hyp, uint64_t msg, uint64_t addr)
552
{
553
554
/* TODO. */
555
556
return (ENXIO);
557
}
558
559
void
560
aplic_cpuinit(struct hypctx *hypctx)
561
{
562
563
}
564
565
void
566
aplic_cpucleanup(struct hypctx *hypctx)
567
{
568
569
}
570
571
void
572
aplic_flush_hwstate(struct hypctx *hypctx)
573
{
574
575
}
576
577
void
578
aplic_sync_hwstate(struct hypctx *hypctx)
579
{
580
581
}
582
583