Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kvm/aia.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
4
* Copyright (C) 2022 Ventana Micro Systems Inc.
5
*
6
* Authors:
7
* Anup Patel <[email protected]>
8
*/
9
10
#include <linux/kernel.h>
11
#include <linux/bitops.h>
12
#include <linux/irq.h>
13
#include <linux/irqchip/riscv-imsic.h>
14
#include <linux/irqdomain.h>
15
#include <linux/kvm_host.h>
16
#include <linux/percpu.h>
17
#include <linux/spinlock.h>
18
#include <asm/cpufeature.h>
19
#include <asm/kvm_nacl.h>
20
21
struct aia_hgei_control {
22
raw_spinlock_t lock;
23
unsigned long free_bitmap;
24
struct kvm_vcpu *owners[BITS_PER_LONG];
25
};
26
static DEFINE_PER_CPU(struct aia_hgei_control, aia_hgei);
27
static int hgei_parent_irq;
28
29
unsigned int kvm_riscv_aia_nr_hgei;
30
unsigned int kvm_riscv_aia_max_ids;
31
DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
32
33
static inline unsigned long aia_hvictl_value(bool ext_irq_pending)
34
{
35
unsigned long hvictl;
36
37
/*
38
* HVICTL.IID == 9 and HVICTL.IPRIO == 0 represents
39
* no interrupt in HVICTL.
40
*/
41
42
hvictl = (IRQ_S_EXT << HVICTL_IID_SHIFT) & HVICTL_IID;
43
hvictl |= ext_irq_pending;
44
return hvictl;
45
}
46
47
#ifdef CONFIG_32BIT
48
void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
49
{
50
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
51
unsigned long mask, val;
52
53
if (!kvm_riscv_aia_available())
54
return;
55
56
if (READ_ONCE(vcpu->arch.irqs_pending_mask[1])) {
57
mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[1], 0);
58
val = READ_ONCE(vcpu->arch.irqs_pending[1]) & mask;
59
60
csr->hviph &= ~mask;
61
csr->hviph |= val;
62
}
63
}
64
65
void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
66
{
67
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
68
69
if (kvm_riscv_aia_available())
70
csr->vsieh = ncsr_read(CSR_VSIEH);
71
}
72
#endif
73
74
bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
75
{
76
unsigned long seip;
77
78
if (!kvm_riscv_aia_available())
79
return false;
80
81
#ifdef CONFIG_32BIT
82
if (READ_ONCE(vcpu->arch.irqs_pending[1]) &
83
(vcpu->arch.aia_context.guest_csr.vsieh & upper_32_bits(mask)))
84
return true;
85
#endif
86
87
seip = vcpu->arch.guest_csr.vsie;
88
seip &= (unsigned long)mask;
89
seip &= BIT(IRQ_S_EXT);
90
91
if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
92
return false;
93
94
return kvm_riscv_vcpu_aia_imsic_has_interrupt(vcpu);
95
}
96
97
void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
98
{
99
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
100
101
if (!kvm_riscv_aia_available())
102
return;
103
104
#ifdef CONFIG_32BIT
105
ncsr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
106
#endif
107
ncsr_write(CSR_HVICTL, aia_hvictl_value(!!(csr->hvip & BIT(IRQ_VS_EXT))));
108
}
109
110
void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
111
{
112
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
113
void *nsh;
114
115
if (!kvm_riscv_aia_available())
116
return;
117
118
if (kvm_riscv_nacl_sync_csr_available()) {
119
nsh = nacl_shmem();
120
nacl_csr_write(nsh, CSR_VSISELECT, csr->vsiselect);
121
nacl_csr_write(nsh, CSR_HVIPRIO1, csr->hviprio1);
122
nacl_csr_write(nsh, CSR_HVIPRIO2, csr->hviprio2);
123
#ifdef CONFIG_32BIT
124
nacl_csr_write(nsh, CSR_VSIEH, csr->vsieh);
125
nacl_csr_write(nsh, CSR_HVIPH, csr->hviph);
126
nacl_csr_write(nsh, CSR_HVIPRIO1H, csr->hviprio1h);
127
nacl_csr_write(nsh, CSR_HVIPRIO2H, csr->hviprio2h);
128
#endif
129
} else {
130
csr_write(CSR_VSISELECT, csr->vsiselect);
131
csr_write(CSR_HVIPRIO1, csr->hviprio1);
132
csr_write(CSR_HVIPRIO2, csr->hviprio2);
133
#ifdef CONFIG_32BIT
134
csr_write(CSR_VSIEH, csr->vsieh);
135
csr_write(CSR_HVIPH, csr->hviph);
136
csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
137
csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
138
#endif
139
}
140
141
if (kvm_riscv_aia_initialized(vcpu->kvm))
142
kvm_riscv_vcpu_aia_imsic_load(vcpu, cpu);
143
}
144
145
void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
146
{
147
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
148
void *nsh;
149
150
if (!kvm_riscv_aia_available())
151
return;
152
153
if (kvm_riscv_aia_initialized(vcpu->kvm))
154
kvm_riscv_vcpu_aia_imsic_put(vcpu);
155
156
if (kvm_riscv_nacl_available()) {
157
nsh = nacl_shmem();
158
csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT);
159
csr->hviprio1 = nacl_csr_read(nsh, CSR_HVIPRIO1);
160
csr->hviprio2 = nacl_csr_read(nsh, CSR_HVIPRIO2);
161
#ifdef CONFIG_32BIT
162
csr->vsieh = nacl_csr_read(nsh, CSR_VSIEH);
163
csr->hviph = nacl_csr_read(nsh, CSR_HVIPH);
164
csr->hviprio1h = nacl_csr_read(nsh, CSR_HVIPRIO1H);
165
csr->hviprio2h = nacl_csr_read(nsh, CSR_HVIPRIO2H);
166
#endif
167
} else {
168
csr->vsiselect = csr_read(CSR_VSISELECT);
169
csr->hviprio1 = csr_read(CSR_HVIPRIO1);
170
csr->hviprio2 = csr_read(CSR_HVIPRIO2);
171
#ifdef CONFIG_32BIT
172
csr->vsieh = csr_read(CSR_VSIEH);
173
csr->hviph = csr_read(CSR_HVIPH);
174
csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
175
csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
176
#endif
177
}
178
}
179
180
int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
181
unsigned long reg_num,
182
unsigned long *out_val)
183
{
184
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
185
186
if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
187
return -ENOENT;
188
189
*out_val = 0;
190
if (kvm_riscv_aia_available())
191
*out_val = ((unsigned long *)csr)[reg_num];
192
193
return 0;
194
}
195
196
int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
197
unsigned long reg_num,
198
unsigned long val)
199
{
200
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
201
202
if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
203
return -ENOENT;
204
205
if (kvm_riscv_aia_available()) {
206
((unsigned long *)csr)[reg_num] = val;
207
208
#ifdef CONFIG_32BIT
209
if (reg_num == KVM_REG_RISCV_CSR_AIA_REG(siph))
210
WRITE_ONCE(vcpu->arch.irqs_pending_mask[1], 0);
211
#endif
212
}
213
214
return 0;
215
}
216
217
int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
218
unsigned int csr_num,
219
unsigned long *val,
220
unsigned long new_val,
221
unsigned long wr_mask)
222
{
223
/* If AIA not available then redirect trap */
224
if (!kvm_riscv_aia_available())
225
return KVM_INSN_ILLEGAL_TRAP;
226
227
/* If AIA not initialized then forward to user space */
228
if (!kvm_riscv_aia_initialized(vcpu->kvm))
229
return KVM_INSN_EXIT_TO_USER_SPACE;
230
231
return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, KVM_RISCV_AIA_IMSIC_TOPEI,
232
val, new_val, wr_mask);
233
}
234
235
/*
236
* External IRQ priority always read-only zero. This means default
237
* priority order is always preferred for external IRQs unless
238
* HVICTL.IID == 9 and HVICTL.IPRIO != 0
239
*/
240
static int aia_irq2bitpos[] = {
241
0, 8, -1, -1, 16, 24, -1, -1, /* 0 - 7 */
242
32, -1, -1, -1, -1, 40, 48, 56, /* 8 - 15 */
243
64, 72, 80, 88, 96, 104, 112, 120, /* 16 - 23 */
244
-1, -1, -1, -1, -1, -1, -1, -1, /* 24 - 31 */
245
-1, -1, -1, -1, -1, -1, -1, -1, /* 32 - 39 */
246
-1, -1, -1, -1, -1, -1, -1, -1, /* 40 - 47 */
247
-1, -1, -1, -1, -1, -1, -1, -1, /* 48 - 55 */
248
-1, -1, -1, -1, -1, -1, -1, -1, /* 56 - 63 */
249
};
250
251
static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq)
252
{
253
unsigned long hviprio;
254
int bitpos = aia_irq2bitpos[irq];
255
256
if (bitpos < 0)
257
return 0;
258
259
switch (bitpos / BITS_PER_LONG) {
260
case 0:
261
hviprio = ncsr_read(CSR_HVIPRIO1);
262
break;
263
case 1:
264
#ifndef CONFIG_32BIT
265
hviprio = ncsr_read(CSR_HVIPRIO2);
266
break;
267
#else
268
hviprio = ncsr_read(CSR_HVIPRIO1H);
269
break;
270
case 2:
271
hviprio = ncsr_read(CSR_HVIPRIO2);
272
break;
273
case 3:
274
hviprio = ncsr_read(CSR_HVIPRIO2H);
275
break;
276
#endif
277
default:
278
return 0;
279
}
280
281
return (hviprio >> (bitpos % BITS_PER_LONG)) & TOPI_IPRIO_MASK;
282
}
283
284
static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
285
{
286
unsigned long hviprio;
287
int bitpos = aia_irq2bitpos[irq];
288
289
if (bitpos < 0)
290
return;
291
292
switch (bitpos / BITS_PER_LONG) {
293
case 0:
294
hviprio = ncsr_read(CSR_HVIPRIO1);
295
break;
296
case 1:
297
#ifndef CONFIG_32BIT
298
hviprio = ncsr_read(CSR_HVIPRIO2);
299
break;
300
#else
301
hviprio = ncsr_read(CSR_HVIPRIO1H);
302
break;
303
case 2:
304
hviprio = ncsr_read(CSR_HVIPRIO2);
305
break;
306
case 3:
307
hviprio = ncsr_read(CSR_HVIPRIO2H);
308
break;
309
#endif
310
default:
311
return;
312
}
313
314
hviprio &= ~(TOPI_IPRIO_MASK << (bitpos % BITS_PER_LONG));
315
hviprio |= (unsigned long)prio << (bitpos % BITS_PER_LONG);
316
317
switch (bitpos / BITS_PER_LONG) {
318
case 0:
319
ncsr_write(CSR_HVIPRIO1, hviprio);
320
break;
321
case 1:
322
#ifndef CONFIG_32BIT
323
ncsr_write(CSR_HVIPRIO2, hviprio);
324
break;
325
#else
326
ncsr_write(CSR_HVIPRIO1H, hviprio);
327
break;
328
case 2:
329
ncsr_write(CSR_HVIPRIO2, hviprio);
330
break;
331
case 3:
332
ncsr_write(CSR_HVIPRIO2H, hviprio);
333
break;
334
#endif
335
default:
336
return;
337
}
338
}
339
340
static int aia_rmw_iprio(struct kvm_vcpu *vcpu, unsigned int isel,
341
unsigned long *val, unsigned long new_val,
342
unsigned long wr_mask)
343
{
344
int i, first_irq, nirqs;
345
unsigned long old_val;
346
u8 prio;
347
348
#ifndef CONFIG_32BIT
349
if (isel & 0x1)
350
return KVM_INSN_ILLEGAL_TRAP;
351
#endif
352
353
nirqs = 4 * (BITS_PER_LONG / 32);
354
first_irq = (isel - ISELECT_IPRIO0) * 4;
355
356
old_val = 0;
357
for (i = 0; i < nirqs; i++) {
358
prio = aia_get_iprio8(vcpu, first_irq + i);
359
old_val |= (unsigned long)prio << (TOPI_IPRIO_BITS * i);
360
}
361
362
if (val)
363
*val = old_val;
364
365
if (wr_mask) {
366
new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
367
for (i = 0; i < nirqs; i++) {
368
prio = (new_val >> (TOPI_IPRIO_BITS * i)) &
369
TOPI_IPRIO_MASK;
370
aia_set_iprio8(vcpu, first_irq + i, prio);
371
}
372
}
373
374
return KVM_INSN_CONTINUE_NEXT_SEPC;
375
}
376
377
int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
378
unsigned long *val, unsigned long new_val,
379
unsigned long wr_mask)
380
{
381
unsigned int isel;
382
383
/* If AIA not available then redirect trap */
384
if (!kvm_riscv_aia_available())
385
return KVM_INSN_ILLEGAL_TRAP;
386
387
/* First try to emulate in kernel space */
388
isel = ncsr_read(CSR_VSISELECT) & ISELECT_MASK;
389
if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15)
390
return aia_rmw_iprio(vcpu, isel, val, new_val, wr_mask);
391
else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST &&
392
kvm_riscv_aia_initialized(vcpu->kvm))
393
return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, isel, val, new_val,
394
wr_mask);
395
396
/* We can't handle it here so redirect to user space */
397
return KVM_INSN_EXIT_TO_USER_SPACE;
398
}
399
400
int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
401
void __iomem **hgei_va, phys_addr_t *hgei_pa)
402
{
403
int ret = -ENOENT;
404
unsigned long flags;
405
const struct imsic_global_config *gc;
406
const struct imsic_local_config *lc;
407
struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
408
409
if (!kvm_riscv_aia_available() || !hgctrl)
410
return -ENODEV;
411
412
raw_spin_lock_irqsave(&hgctrl->lock, flags);
413
414
if (hgctrl->free_bitmap) {
415
ret = __ffs(hgctrl->free_bitmap);
416
hgctrl->free_bitmap &= ~BIT(ret);
417
hgctrl->owners[ret] = owner;
418
}
419
420
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
421
422
gc = imsic_get_global_config();
423
lc = (gc) ? per_cpu_ptr(gc->local, cpu) : NULL;
424
if (lc && ret > 0) {
425
if (hgei_va)
426
*hgei_va = lc->msi_va + (ret * IMSIC_MMIO_PAGE_SZ);
427
if (hgei_pa)
428
*hgei_pa = lc->msi_pa + (ret * IMSIC_MMIO_PAGE_SZ);
429
}
430
431
return ret;
432
}
433
434
void kvm_riscv_aia_free_hgei(int cpu, int hgei)
435
{
436
unsigned long flags;
437
struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
438
439
if (!kvm_riscv_aia_available() || !hgctrl)
440
return;
441
442
raw_spin_lock_irqsave(&hgctrl->lock, flags);
443
444
if (hgei > 0 && hgei <= kvm_riscv_aia_nr_hgei) {
445
if (!(hgctrl->free_bitmap & BIT(hgei))) {
446
hgctrl->free_bitmap |= BIT(hgei);
447
hgctrl->owners[hgei] = NULL;
448
}
449
}
450
451
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
452
}
453
454
static irqreturn_t hgei_interrupt(int irq, void *dev_id)
455
{
456
int i;
457
unsigned long hgei_mask, flags;
458
struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
459
460
hgei_mask = csr_read(CSR_HGEIP) & csr_read(CSR_HGEIE);
461
csr_clear(CSR_HGEIE, hgei_mask);
462
463
raw_spin_lock_irqsave(&hgctrl->lock, flags);
464
465
for_each_set_bit(i, &hgei_mask, BITS_PER_LONG) {
466
if (hgctrl->owners[i])
467
kvm_vcpu_kick(hgctrl->owners[i]);
468
}
469
470
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
471
472
put_cpu_ptr(&aia_hgei);
473
return IRQ_HANDLED;
474
}
475
476
static int aia_hgei_init(void)
477
{
478
int cpu, rc;
479
struct irq_domain *domain;
480
struct aia_hgei_control *hgctrl;
481
482
/* Initialize per-CPU guest external interrupt line management */
483
for_each_possible_cpu(cpu) {
484
hgctrl = per_cpu_ptr(&aia_hgei, cpu);
485
raw_spin_lock_init(&hgctrl->lock);
486
if (kvm_riscv_aia_nr_hgei) {
487
hgctrl->free_bitmap =
488
BIT(kvm_riscv_aia_nr_hgei + 1) - 1;
489
hgctrl->free_bitmap &= ~BIT(0);
490
} else
491
hgctrl->free_bitmap = 0;
492
}
493
494
/* Skip SGEI interrupt setup for zero guest external interrupts */
495
if (!kvm_riscv_aia_nr_hgei)
496
goto skip_sgei_interrupt;
497
498
/* Find INTC irq domain */
499
domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
500
DOMAIN_BUS_ANY);
501
if (!domain) {
502
kvm_err("unable to find INTC domain\n");
503
return -ENOENT;
504
}
505
506
/* Map per-CPU SGEI interrupt from INTC domain */
507
hgei_parent_irq = irq_create_mapping(domain, IRQ_S_GEXT);
508
if (!hgei_parent_irq) {
509
kvm_err("unable to map SGEI IRQ\n");
510
return -ENOMEM;
511
}
512
513
/* Request per-CPU SGEI interrupt */
514
rc = request_percpu_irq(hgei_parent_irq, hgei_interrupt,
515
"riscv-kvm", &aia_hgei);
516
if (rc) {
517
kvm_err("failed to request SGEI IRQ\n");
518
return rc;
519
}
520
521
skip_sgei_interrupt:
522
return 0;
523
}
524
525
static void aia_hgei_exit(void)
526
{
527
/* Do nothing for zero guest external interrupts */
528
if (!kvm_riscv_aia_nr_hgei)
529
return;
530
531
/* Free per-CPU SGEI interrupt */
532
free_percpu_irq(hgei_parent_irq, &aia_hgei);
533
}
534
535
void kvm_riscv_aia_enable(void)
536
{
537
if (!kvm_riscv_aia_available())
538
return;
539
540
csr_write(CSR_HVICTL, aia_hvictl_value(false));
541
csr_write(CSR_HVIPRIO1, 0x0);
542
csr_write(CSR_HVIPRIO2, 0x0);
543
#ifdef CONFIG_32BIT
544
csr_write(CSR_HVIPH, 0x0);
545
csr_write(CSR_HIDELEGH, 0x0);
546
csr_write(CSR_HVIPRIO1H, 0x0);
547
csr_write(CSR_HVIPRIO2H, 0x0);
548
#endif
549
550
/* Enable per-CPU SGEI interrupt */
551
enable_percpu_irq(hgei_parent_irq,
552
irq_get_trigger_type(hgei_parent_irq));
553
csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
554
/* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
555
if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
556
csr_set(CSR_HVIEN, BIT(IRQ_PMU_OVF));
557
}
558
559
void kvm_riscv_aia_disable(void)
560
{
561
int i;
562
unsigned long flags;
563
struct kvm_vcpu *vcpu;
564
struct aia_hgei_control *hgctrl;
565
566
if (!kvm_riscv_aia_available())
567
return;
568
hgctrl = get_cpu_ptr(&aia_hgei);
569
570
if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
571
csr_clear(CSR_HVIEN, BIT(IRQ_PMU_OVF));
572
/* Disable per-CPU SGEI interrupt */
573
csr_clear(CSR_HIE, BIT(IRQ_S_GEXT));
574
disable_percpu_irq(hgei_parent_irq);
575
576
csr_write(CSR_HVICTL, aia_hvictl_value(false));
577
578
raw_spin_lock_irqsave(&hgctrl->lock, flags);
579
580
for (i = 0; i <= kvm_riscv_aia_nr_hgei; i++) {
581
vcpu = hgctrl->owners[i];
582
if (!vcpu)
583
continue;
584
585
/*
586
* We release hgctrl->lock before notifying IMSIC
587
* so that we don't have lock ordering issues.
588
*/
589
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
590
591
/* Notify IMSIC */
592
kvm_riscv_vcpu_aia_imsic_release(vcpu);
593
594
/*
595
* Wakeup VCPU if it was blocked so that it can
596
* run on other HARTs
597
*/
598
if (csr_read(CSR_HGEIE) & BIT(i)) {
599
csr_clear(CSR_HGEIE, BIT(i));
600
kvm_vcpu_kick(vcpu);
601
}
602
603
raw_spin_lock_irqsave(&hgctrl->lock, flags);
604
}
605
606
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
607
608
put_cpu_ptr(&aia_hgei);
609
}
610
611
int kvm_riscv_aia_init(void)
612
{
613
int rc;
614
const struct imsic_global_config *gc;
615
616
if (!riscv_isa_extension_available(NULL, SxAIA))
617
return -ENODEV;
618
gc = imsic_get_global_config();
619
620
/* Figure-out number of bits in HGEIE */
621
csr_write(CSR_HGEIE, -1UL);
622
kvm_riscv_aia_nr_hgei = fls_long(csr_read(CSR_HGEIE));
623
csr_write(CSR_HGEIE, 0);
624
if (kvm_riscv_aia_nr_hgei)
625
kvm_riscv_aia_nr_hgei--;
626
627
/*
628
* Number of usable HGEI lines should be minimum of per-HART
629
* IMSIC guest files and number of bits in HGEIE
630
*/
631
if (gc)
632
kvm_riscv_aia_nr_hgei = min((ulong)kvm_riscv_aia_nr_hgei,
633
BIT(gc->guest_index_bits) - 1);
634
else
635
kvm_riscv_aia_nr_hgei = 0;
636
637
/* Find number of guest MSI IDs */
638
kvm_riscv_aia_max_ids = IMSIC_MAX_ID;
639
if (gc && kvm_riscv_aia_nr_hgei)
640
kvm_riscv_aia_max_ids = gc->nr_guest_ids + 1;
641
642
/* Initialize guest external interrupt line management */
643
rc = aia_hgei_init();
644
if (rc)
645
return rc;
646
647
/* Register device operations */
648
rc = kvm_register_device_ops(&kvm_riscv_aia_device_ops,
649
KVM_DEV_TYPE_RISCV_AIA);
650
if (rc) {
651
aia_hgei_exit();
652
return rc;
653
}
654
655
/* Enable KVM AIA support */
656
static_branch_enable(&kvm_riscv_aia_available);
657
658
return 0;
659
}
660
661
void kvm_riscv_aia_exit(void)
662
{
663
if (!kvm_riscv_aia_available())
664
return;
665
666
/* Unregister device operations */
667
kvm_unregister_device_ops(KVM_DEV_TYPE_RISCV_AIA);
668
669
/* Cleanup the HGEI state */
670
aia_hgei_exit();
671
}
672
673