Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kvm/aia_aplic.c
26442 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
4
* Copyright (C) 2022 Ventana Micro Systems Inc.
5
*
6
* Authors:
7
* Anup Patel <[email protected]>
8
*/
9
10
#include <linux/irqchip/riscv-aplic.h>
11
#include <linux/kvm_host.h>
12
#include <linux/math.h>
13
#include <linux/spinlock.h>
14
#include <linux/swab.h>
15
#include <kvm/iodev.h>
16
17
struct aplic_irq {
18
raw_spinlock_t lock;
19
u32 sourcecfg;
20
u32 state;
21
#define APLIC_IRQ_STATE_PENDING BIT(0)
22
#define APLIC_IRQ_STATE_ENABLED BIT(1)
23
#define APLIC_IRQ_STATE_ENPEND (APLIC_IRQ_STATE_PENDING | \
24
APLIC_IRQ_STATE_ENABLED)
25
#define APLIC_IRQ_STATE_INPUT BIT(8)
26
u32 target;
27
};
28
29
struct aplic {
30
struct kvm_io_device iodev;
31
32
u32 domaincfg;
33
u32 genmsi;
34
35
u32 nr_irqs;
36
u32 nr_words;
37
struct aplic_irq *irqs;
38
};
39
40
static u32 aplic_read_sourcecfg(struct aplic *aplic, u32 irq)
41
{
42
u32 ret;
43
unsigned long flags;
44
struct aplic_irq *irqd;
45
46
if (!irq || aplic->nr_irqs <= irq)
47
return 0;
48
irqd = &aplic->irqs[irq];
49
50
raw_spin_lock_irqsave(&irqd->lock, flags);
51
ret = irqd->sourcecfg;
52
raw_spin_unlock_irqrestore(&irqd->lock, flags);
53
54
return ret;
55
}
56
57
static void aplic_write_sourcecfg(struct aplic *aplic, u32 irq, u32 val)
58
{
59
unsigned long flags;
60
struct aplic_irq *irqd;
61
62
if (!irq || aplic->nr_irqs <= irq)
63
return;
64
irqd = &aplic->irqs[irq];
65
66
if (val & APLIC_SOURCECFG_D)
67
val = 0;
68
else
69
val &= APLIC_SOURCECFG_SM_MASK;
70
71
raw_spin_lock_irqsave(&irqd->lock, flags);
72
irqd->sourcecfg = val;
73
raw_spin_unlock_irqrestore(&irqd->lock, flags);
74
}
75
76
static u32 aplic_read_target(struct aplic *aplic, u32 irq)
77
{
78
u32 ret;
79
unsigned long flags;
80
struct aplic_irq *irqd;
81
82
if (!irq || aplic->nr_irqs <= irq)
83
return 0;
84
irqd = &aplic->irqs[irq];
85
86
raw_spin_lock_irqsave(&irqd->lock, flags);
87
ret = irqd->target;
88
raw_spin_unlock_irqrestore(&irqd->lock, flags);
89
90
return ret;
91
}
92
93
static void aplic_write_target(struct aplic *aplic, u32 irq, u32 val)
94
{
95
unsigned long flags;
96
struct aplic_irq *irqd;
97
98
if (!irq || aplic->nr_irqs <= irq)
99
return;
100
irqd = &aplic->irqs[irq];
101
102
val &= APLIC_TARGET_EIID_MASK |
103
(APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) |
104
(APLIC_TARGET_GUEST_IDX_MASK << APLIC_TARGET_GUEST_IDX_SHIFT);
105
106
raw_spin_lock_irqsave(&irqd->lock, flags);
107
irqd->target = val;
108
raw_spin_unlock_irqrestore(&irqd->lock, flags);
109
}
110
111
static bool aplic_read_pending(struct aplic *aplic, u32 irq)
112
{
113
bool ret;
114
unsigned long flags;
115
struct aplic_irq *irqd;
116
117
if (!irq || aplic->nr_irqs <= irq)
118
return false;
119
irqd = &aplic->irqs[irq];
120
121
raw_spin_lock_irqsave(&irqd->lock, flags);
122
ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false;
123
raw_spin_unlock_irqrestore(&irqd->lock, flags);
124
125
return ret;
126
}
127
128
static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
129
{
130
unsigned long flags, sm;
131
struct aplic_irq *irqd;
132
133
if (!irq || aplic->nr_irqs <= irq)
134
return;
135
irqd = &aplic->irqs[irq];
136
137
raw_spin_lock_irqsave(&irqd->lock, flags);
138
139
sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK;
140
if (sm == APLIC_SOURCECFG_SM_INACTIVE)
141
goto skip_write_pending;
142
143
if (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH ||
144
sm == APLIC_SOURCECFG_SM_LEVEL_LOW) {
145
if (!pending)
146
goto noskip_write_pending;
147
if ((irqd->state & APLIC_IRQ_STATE_INPUT) &&
148
sm == APLIC_SOURCECFG_SM_LEVEL_LOW)
149
goto skip_write_pending;
150
if (!(irqd->state & APLIC_IRQ_STATE_INPUT) &&
151
sm == APLIC_SOURCECFG_SM_LEVEL_HIGH)
152
goto skip_write_pending;
153
}
154
155
noskip_write_pending:
156
if (pending)
157
irqd->state |= APLIC_IRQ_STATE_PENDING;
158
else
159
irqd->state &= ~APLIC_IRQ_STATE_PENDING;
160
161
skip_write_pending:
162
raw_spin_unlock_irqrestore(&irqd->lock, flags);
163
}
164
165
static bool aplic_read_enabled(struct aplic *aplic, u32 irq)
166
{
167
bool ret;
168
unsigned long flags;
169
struct aplic_irq *irqd;
170
171
if (!irq || aplic->nr_irqs <= irq)
172
return false;
173
irqd = &aplic->irqs[irq];
174
175
raw_spin_lock_irqsave(&irqd->lock, flags);
176
ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false;
177
raw_spin_unlock_irqrestore(&irqd->lock, flags);
178
179
return ret;
180
}
181
182
static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
183
{
184
unsigned long flags;
185
struct aplic_irq *irqd;
186
187
if (!irq || aplic->nr_irqs <= irq)
188
return;
189
irqd = &aplic->irqs[irq];
190
191
raw_spin_lock_irqsave(&irqd->lock, flags);
192
if (enabled)
193
irqd->state |= APLIC_IRQ_STATE_ENABLED;
194
else
195
irqd->state &= ~APLIC_IRQ_STATE_ENABLED;
196
raw_spin_unlock_irqrestore(&irqd->lock, flags);
197
}
198
199
static bool aplic_read_input(struct aplic *aplic, u32 irq)
200
{
201
u32 sourcecfg, sm, raw_input, irq_inverted;
202
struct aplic_irq *irqd;
203
unsigned long flags;
204
bool ret = false;
205
206
if (!irq || aplic->nr_irqs <= irq)
207
return false;
208
irqd = &aplic->irqs[irq];
209
210
raw_spin_lock_irqsave(&irqd->lock, flags);
211
212
sourcecfg = irqd->sourcecfg;
213
if (sourcecfg & APLIC_SOURCECFG_D)
214
goto skip;
215
216
sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
217
if (sm == APLIC_SOURCECFG_SM_INACTIVE)
218
goto skip;
219
220
raw_input = (irqd->state & APLIC_IRQ_STATE_INPUT) ? 1 : 0;
221
irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW ||
222
sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0;
223
ret = !!(raw_input ^ irq_inverted);
224
225
skip:
226
raw_spin_unlock_irqrestore(&irqd->lock, flags);
227
228
return ret;
229
}
230
231
static void aplic_inject_msi(struct kvm *kvm, u32 irq, u32 target)
232
{
233
u32 hart_idx, guest_idx, eiid;
234
235
hart_idx = target >> APLIC_TARGET_HART_IDX_SHIFT;
236
hart_idx &= APLIC_TARGET_HART_IDX_MASK;
237
guest_idx = target >> APLIC_TARGET_GUEST_IDX_SHIFT;
238
guest_idx &= APLIC_TARGET_GUEST_IDX_MASK;
239
eiid = target & APLIC_TARGET_EIID_MASK;
240
kvm_riscv_aia_inject_msi_by_id(kvm, hart_idx, guest_idx, eiid);
241
}
242
243
static void aplic_update_irq_range(struct kvm *kvm, u32 first, u32 last)
244
{
245
bool inject;
246
u32 irq, target;
247
unsigned long flags;
248
struct aplic_irq *irqd;
249
struct aplic *aplic = kvm->arch.aia.aplic_state;
250
251
if (!(aplic->domaincfg & APLIC_DOMAINCFG_IE))
252
return;
253
254
for (irq = first; irq <= last; irq++) {
255
if (!irq || aplic->nr_irqs <= irq)
256
continue;
257
irqd = &aplic->irqs[irq];
258
259
raw_spin_lock_irqsave(&irqd->lock, flags);
260
261
inject = false;
262
target = irqd->target;
263
if ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
264
APLIC_IRQ_STATE_ENPEND) {
265
irqd->state &= ~APLIC_IRQ_STATE_PENDING;
266
inject = true;
267
}
268
269
raw_spin_unlock_irqrestore(&irqd->lock, flags);
270
271
if (inject)
272
aplic_inject_msi(kvm, irq, target);
273
}
274
}
275
276
int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level)
277
{
278
u32 target;
279
bool inject = false, ie;
280
unsigned long flags;
281
struct aplic_irq *irqd;
282
struct aplic *aplic = kvm->arch.aia.aplic_state;
283
284
if (!aplic || !source || (aplic->nr_irqs <= source))
285
return -ENODEV;
286
irqd = &aplic->irqs[source];
287
ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false;
288
289
raw_spin_lock_irqsave(&irqd->lock, flags);
290
291
if (irqd->sourcecfg & APLIC_SOURCECFG_D)
292
goto skip_unlock;
293
294
switch (irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK) {
295
case APLIC_SOURCECFG_SM_EDGE_RISE:
296
if (level && !(irqd->state & APLIC_IRQ_STATE_INPUT) &&
297
!(irqd->state & APLIC_IRQ_STATE_PENDING))
298
irqd->state |= APLIC_IRQ_STATE_PENDING;
299
break;
300
case APLIC_SOURCECFG_SM_EDGE_FALL:
301
if (!level && (irqd->state & APLIC_IRQ_STATE_INPUT) &&
302
!(irqd->state & APLIC_IRQ_STATE_PENDING))
303
irqd->state |= APLIC_IRQ_STATE_PENDING;
304
break;
305
case APLIC_SOURCECFG_SM_LEVEL_HIGH:
306
if (level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
307
irqd->state |= APLIC_IRQ_STATE_PENDING;
308
break;
309
case APLIC_SOURCECFG_SM_LEVEL_LOW:
310
if (!level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
311
irqd->state |= APLIC_IRQ_STATE_PENDING;
312
break;
313
}
314
315
if (level)
316
irqd->state |= APLIC_IRQ_STATE_INPUT;
317
else
318
irqd->state &= ~APLIC_IRQ_STATE_INPUT;
319
320
target = irqd->target;
321
if (ie && ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
322
APLIC_IRQ_STATE_ENPEND)) {
323
irqd->state &= ~APLIC_IRQ_STATE_PENDING;
324
inject = true;
325
}
326
327
skip_unlock:
328
raw_spin_unlock_irqrestore(&irqd->lock, flags);
329
330
if (inject)
331
aplic_inject_msi(kvm, source, target);
332
333
return 0;
334
}
335
336
static u32 aplic_read_input_word(struct aplic *aplic, u32 word)
337
{
338
u32 i, ret = 0;
339
340
for (i = 0; i < 32; i++)
341
ret |= aplic_read_input(aplic, word * 32 + i) ? BIT(i) : 0;
342
343
return ret;
344
}
345
346
static u32 aplic_read_pending_word(struct aplic *aplic, u32 word)
347
{
348
u32 i, ret = 0;
349
350
for (i = 0; i < 32; i++)
351
ret |= aplic_read_pending(aplic, word * 32 + i) ? BIT(i) : 0;
352
353
return ret;
354
}
355
356
static void aplic_write_pending_word(struct aplic *aplic, u32 word,
357
u32 val, bool pending)
358
{
359
u32 i;
360
361
for (i = 0; i < 32; i++) {
362
if (val & BIT(i))
363
aplic_write_pending(aplic, word * 32 + i, pending);
364
}
365
}
366
367
static u32 aplic_read_enabled_word(struct aplic *aplic, u32 word)
368
{
369
u32 i, ret = 0;
370
371
for (i = 0; i < 32; i++)
372
ret |= aplic_read_enabled(aplic, word * 32 + i) ? BIT(i) : 0;
373
374
return ret;
375
}
376
377
static void aplic_write_enabled_word(struct aplic *aplic, u32 word,
378
u32 val, bool enabled)
379
{
380
u32 i;
381
382
for (i = 0; i < 32; i++) {
383
if (val & BIT(i))
384
aplic_write_enabled(aplic, word * 32 + i, enabled);
385
}
386
}
387
388
static int aplic_mmio_read_offset(struct kvm *kvm, gpa_t off, u32 *val32)
389
{
390
u32 i;
391
struct aplic *aplic = kvm->arch.aia.aplic_state;
392
393
if ((off & 0x3) != 0)
394
return -EOPNOTSUPP;
395
396
if (off == APLIC_DOMAINCFG) {
397
*val32 = APLIC_DOMAINCFG_RDONLY |
398
aplic->domaincfg | APLIC_DOMAINCFG_DM;
399
} else if ((off >= APLIC_SOURCECFG_BASE) &&
400
(off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
401
i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
402
*val32 = aplic_read_sourcecfg(aplic, i);
403
} else if ((off >= APLIC_SETIP_BASE) &&
404
(off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
405
i = (off - APLIC_SETIP_BASE) >> 2;
406
*val32 = aplic_read_pending_word(aplic, i);
407
} else if (off == APLIC_SETIPNUM) {
408
*val32 = 0;
409
} else if ((off >= APLIC_CLRIP_BASE) &&
410
(off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
411
i = (off - APLIC_CLRIP_BASE) >> 2;
412
*val32 = aplic_read_input_word(aplic, i);
413
} else if (off == APLIC_CLRIPNUM) {
414
*val32 = 0;
415
} else if ((off >= APLIC_SETIE_BASE) &&
416
(off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
417
i = (off - APLIC_SETIE_BASE) >> 2;
418
*val32 = aplic_read_enabled_word(aplic, i);
419
} else if (off == APLIC_SETIENUM) {
420
*val32 = 0;
421
} else if ((off >= APLIC_CLRIE_BASE) &&
422
(off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
423
*val32 = 0;
424
} else if (off == APLIC_CLRIENUM) {
425
*val32 = 0;
426
} else if (off == APLIC_SETIPNUM_LE) {
427
*val32 = 0;
428
} else if (off == APLIC_SETIPNUM_BE) {
429
*val32 = 0;
430
} else if (off == APLIC_GENMSI) {
431
*val32 = aplic->genmsi;
432
} else if ((off >= APLIC_TARGET_BASE) &&
433
(off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
434
i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
435
*val32 = aplic_read_target(aplic, i);
436
} else
437
return -ENODEV;
438
439
return 0;
440
}
441
442
static int aplic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
443
gpa_t addr, int len, void *val)
444
{
445
if (len != 4)
446
return -EOPNOTSUPP;
447
448
return aplic_mmio_read_offset(vcpu->kvm,
449
addr - vcpu->kvm->arch.aia.aplic_addr,
450
val);
451
}
452
453
static int aplic_mmio_write_offset(struct kvm *kvm, gpa_t off, u32 val32)
454
{
455
u32 i;
456
struct aplic *aplic = kvm->arch.aia.aplic_state;
457
458
if ((off & 0x3) != 0)
459
return -EOPNOTSUPP;
460
461
if (off == APLIC_DOMAINCFG) {
462
/* Only IE bit writeable */
463
aplic->domaincfg = val32 & APLIC_DOMAINCFG_IE;
464
} else if ((off >= APLIC_SOURCECFG_BASE) &&
465
(off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
466
i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
467
aplic_write_sourcecfg(aplic, i, val32);
468
} else if ((off >= APLIC_SETIP_BASE) &&
469
(off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
470
i = (off - APLIC_SETIP_BASE) >> 2;
471
aplic_write_pending_word(aplic, i, val32, true);
472
} else if (off == APLIC_SETIPNUM) {
473
aplic_write_pending(aplic, val32, true);
474
} else if ((off >= APLIC_CLRIP_BASE) &&
475
(off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
476
i = (off - APLIC_CLRIP_BASE) >> 2;
477
aplic_write_pending_word(aplic, i, val32, false);
478
} else if (off == APLIC_CLRIPNUM) {
479
aplic_write_pending(aplic, val32, false);
480
} else if ((off >= APLIC_SETIE_BASE) &&
481
(off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
482
i = (off - APLIC_SETIE_BASE) >> 2;
483
aplic_write_enabled_word(aplic, i, val32, true);
484
} else if (off == APLIC_SETIENUM) {
485
aplic_write_enabled(aplic, val32, true);
486
} else if ((off >= APLIC_CLRIE_BASE) &&
487
(off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
488
i = (off - APLIC_CLRIE_BASE) >> 2;
489
aplic_write_enabled_word(aplic, i, val32, false);
490
} else if (off == APLIC_CLRIENUM) {
491
aplic_write_enabled(aplic, val32, false);
492
} else if (off == APLIC_SETIPNUM_LE) {
493
aplic_write_pending(aplic, val32, true);
494
} else if (off == APLIC_SETIPNUM_BE) {
495
aplic_write_pending(aplic, __swab32(val32), true);
496
} else if (off == APLIC_GENMSI) {
497
aplic->genmsi = val32 & ~(APLIC_TARGET_GUEST_IDX_MASK <<
498
APLIC_TARGET_GUEST_IDX_SHIFT);
499
kvm_riscv_aia_inject_msi_by_id(kvm,
500
val32 >> APLIC_TARGET_HART_IDX_SHIFT, 0,
501
val32 & APLIC_TARGET_EIID_MASK);
502
} else if ((off >= APLIC_TARGET_BASE) &&
503
(off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
504
i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
505
aplic_write_target(aplic, i, val32);
506
} else
507
return -ENODEV;
508
509
aplic_update_irq_range(kvm, 1, aplic->nr_irqs - 1);
510
511
return 0;
512
}
513
514
static int aplic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
515
gpa_t addr, int len, const void *val)
516
{
517
if (len != 4)
518
return -EOPNOTSUPP;
519
520
return aplic_mmio_write_offset(vcpu->kvm,
521
addr - vcpu->kvm->arch.aia.aplic_addr,
522
*((const u32 *)val));
523
}
524
525
static struct kvm_io_device_ops aplic_iodoev_ops = {
526
.read = aplic_mmio_read,
527
.write = aplic_mmio_write,
528
};
529
530
int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v)
531
{
532
int rc;
533
534
if (!kvm->arch.aia.aplic_state)
535
return -ENODEV;
536
537
rc = aplic_mmio_write_offset(kvm, type, v);
538
if (rc)
539
return rc;
540
541
return 0;
542
}
543
544
int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v)
545
{
546
int rc;
547
548
if (!kvm->arch.aia.aplic_state)
549
return -ENODEV;
550
551
rc = aplic_mmio_read_offset(kvm, type, v);
552
if (rc)
553
return rc;
554
555
return 0;
556
}
557
558
int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type)
559
{
560
int rc;
561
u32 val;
562
563
if (!kvm->arch.aia.aplic_state)
564
return -ENODEV;
565
566
rc = aplic_mmio_read_offset(kvm, type, &val);
567
if (rc)
568
return rc;
569
570
return 0;
571
}
572
573
int kvm_riscv_aia_aplic_init(struct kvm *kvm)
574
{
575
int i, ret = 0;
576
struct aplic *aplic;
577
578
/* Do nothing if we have zero sources */
579
if (!kvm->arch.aia.nr_sources)
580
return 0;
581
582
/* Allocate APLIC global state */
583
aplic = kzalloc(sizeof(*aplic), GFP_KERNEL);
584
if (!aplic)
585
return -ENOMEM;
586
kvm->arch.aia.aplic_state = aplic;
587
588
/* Setup APLIC IRQs */
589
aplic->nr_irqs = kvm->arch.aia.nr_sources + 1;
590
aplic->nr_words = DIV_ROUND_UP(aplic->nr_irqs, 32);
591
aplic->irqs = kcalloc(aplic->nr_irqs,
592
sizeof(*aplic->irqs), GFP_KERNEL);
593
if (!aplic->irqs) {
594
ret = -ENOMEM;
595
goto fail_free_aplic;
596
}
597
for (i = 0; i < aplic->nr_irqs; i++)
598
raw_spin_lock_init(&aplic->irqs[i].lock);
599
600
/* Setup IO device */
601
kvm_iodevice_init(&aplic->iodev, &aplic_iodoev_ops);
602
mutex_lock(&kvm->slots_lock);
603
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
604
kvm->arch.aia.aplic_addr,
605
KVM_DEV_RISCV_APLIC_SIZE,
606
&aplic->iodev);
607
mutex_unlock(&kvm->slots_lock);
608
if (ret)
609
goto fail_free_aplic_irqs;
610
611
/* Setup default IRQ routing */
612
ret = kvm_riscv_setup_default_irq_routing(kvm, aplic->nr_irqs);
613
if (ret)
614
goto fail_unreg_iodev;
615
616
return 0;
617
618
fail_unreg_iodev:
619
mutex_lock(&kvm->slots_lock);
620
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
621
mutex_unlock(&kvm->slots_lock);
622
fail_free_aplic_irqs:
623
kfree(aplic->irqs);
624
fail_free_aplic:
625
kvm->arch.aia.aplic_state = NULL;
626
kfree(aplic);
627
return ret;
628
}
629
630
void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm)
631
{
632
struct aplic *aplic = kvm->arch.aia.aplic_state;
633
634
if (!aplic)
635
return;
636
637
mutex_lock(&kvm->slots_lock);
638
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
639
mutex_unlock(&kvm->slots_lock);
640
641
kfree(aplic->irqs);
642
643
kvm->arch.aia.aplic_state = NULL;
644
kfree(aplic);
645
}
646
647