Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kvm/aia_device.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
4
* Copyright (C) 2022 Ventana Micro Systems Inc.
5
*
6
* Authors:
7
* Anup Patel <[email protected]>
8
*/
9
10
#include <linux/bits.h>
11
#include <linux/irqchip/riscv-imsic.h>
12
#include <linux/kvm_host.h>
13
#include <linux/uaccess.h>
14
15
static int aia_create(struct kvm_device *dev, u32 type)
16
{
17
int ret;
18
unsigned long i;
19
struct kvm *kvm = dev->kvm;
20
struct kvm_vcpu *vcpu;
21
22
if (irqchip_in_kernel(kvm))
23
return -EEXIST;
24
25
ret = -EBUSY;
26
if (kvm_trylock_all_vcpus(kvm))
27
return ret;
28
29
kvm_for_each_vcpu(i, vcpu, kvm) {
30
if (vcpu->arch.ran_atleast_once)
31
goto out_unlock;
32
}
33
ret = 0;
34
35
kvm->arch.aia.in_kernel = true;
36
37
out_unlock:
38
kvm_unlock_all_vcpus(kvm);
39
return ret;
40
}
41
42
static void aia_destroy(struct kvm_device *dev)
43
{
44
kfree(dev);
45
}
46
47
static int aia_config(struct kvm *kvm, unsigned long type,
48
u32 *nr, bool write)
49
{
50
struct kvm_aia *aia = &kvm->arch.aia;
51
52
/* Writes can only be done before irqchip is initialized */
53
if (write && kvm_riscv_aia_initialized(kvm))
54
return -EBUSY;
55
56
switch (type) {
57
case KVM_DEV_RISCV_AIA_CONFIG_MODE:
58
if (write) {
59
switch (*nr) {
60
case KVM_DEV_RISCV_AIA_MODE_EMUL:
61
break;
62
case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
63
case KVM_DEV_RISCV_AIA_MODE_AUTO:
64
/*
65
* HW Acceleration and Auto modes only
66
* supported on host with non-zero guest
67
* external interrupts (i.e. non-zero
68
* VS-level IMSIC pages).
69
*/
70
if (!kvm_riscv_aia_nr_hgei)
71
return -EINVAL;
72
break;
73
default:
74
return -EINVAL;
75
}
76
aia->mode = *nr;
77
} else
78
*nr = aia->mode;
79
break;
80
case KVM_DEV_RISCV_AIA_CONFIG_IDS:
81
if (write) {
82
if ((*nr < KVM_DEV_RISCV_AIA_IDS_MIN) ||
83
(*nr >= KVM_DEV_RISCV_AIA_IDS_MAX) ||
84
((*nr & KVM_DEV_RISCV_AIA_IDS_MIN) !=
85
KVM_DEV_RISCV_AIA_IDS_MIN) ||
86
(kvm_riscv_aia_max_ids <= *nr))
87
return -EINVAL;
88
aia->nr_ids = *nr;
89
} else
90
*nr = aia->nr_ids;
91
break;
92
case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
93
if (write) {
94
if ((*nr >= KVM_DEV_RISCV_AIA_SRCS_MAX) ||
95
(*nr >= kvm_riscv_aia_max_ids))
96
return -EINVAL;
97
aia->nr_sources = *nr;
98
} else
99
*nr = aia->nr_sources;
100
break;
101
case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
102
if (write) {
103
if (*nr >= KVM_DEV_RISCV_AIA_GROUP_BITS_MAX)
104
return -EINVAL;
105
aia->nr_group_bits = *nr;
106
} else
107
*nr = aia->nr_group_bits;
108
break;
109
case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
110
if (write) {
111
if ((*nr < KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN) ||
112
(*nr >= KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX))
113
return -EINVAL;
114
aia->nr_group_shift = *nr;
115
} else
116
*nr = aia->nr_group_shift;
117
break;
118
case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
119
if (write) {
120
if (*nr >= KVM_DEV_RISCV_AIA_HART_BITS_MAX)
121
return -EINVAL;
122
aia->nr_hart_bits = *nr;
123
} else
124
*nr = aia->nr_hart_bits;
125
break;
126
case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
127
if (write) {
128
if (*nr >= KVM_DEV_RISCV_AIA_GUEST_BITS_MAX)
129
return -EINVAL;
130
aia->nr_guest_bits = *nr;
131
} else
132
*nr = aia->nr_guest_bits;
133
break;
134
default:
135
return -ENXIO;
136
}
137
138
return 0;
139
}
140
141
static int aia_aplic_addr(struct kvm *kvm, u64 *addr, bool write)
142
{
143
struct kvm_aia *aia = &kvm->arch.aia;
144
145
if (write) {
146
/* Writes can only be done before irqchip is initialized */
147
if (kvm_riscv_aia_initialized(kvm))
148
return -EBUSY;
149
150
if (*addr & (KVM_DEV_RISCV_APLIC_ALIGN - 1))
151
return -EINVAL;
152
153
aia->aplic_addr = *addr;
154
} else
155
*addr = aia->aplic_addr;
156
157
return 0;
158
}
159
160
static int aia_imsic_addr(struct kvm *kvm, u64 *addr,
161
unsigned long vcpu_idx, bool write)
162
{
163
struct kvm_vcpu *vcpu;
164
struct kvm_vcpu_aia *vcpu_aia;
165
166
vcpu = kvm_get_vcpu(kvm, vcpu_idx);
167
if (!vcpu)
168
return -EINVAL;
169
vcpu_aia = &vcpu->arch.aia_context;
170
171
if (write) {
172
/* Writes can only be done before irqchip is initialized */
173
if (kvm_riscv_aia_initialized(kvm))
174
return -EBUSY;
175
176
if (*addr & (KVM_DEV_RISCV_IMSIC_ALIGN - 1))
177
return -EINVAL;
178
}
179
180
mutex_lock(&vcpu->mutex);
181
if (write)
182
vcpu_aia->imsic_addr = *addr;
183
else
184
*addr = vcpu_aia->imsic_addr;
185
mutex_unlock(&vcpu->mutex);
186
187
return 0;
188
}
189
190
static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
191
{
192
u32 h, l;
193
gpa_t mask = 0;
194
195
h = aia->nr_hart_bits + aia->nr_guest_bits +
196
IMSIC_MMIO_PAGE_SHIFT - 1;
197
mask = GENMASK_ULL(h, 0);
198
199
if (aia->nr_group_bits) {
200
h = aia->nr_group_bits + aia->nr_group_shift - 1;
201
l = aia->nr_group_shift;
202
mask |= GENMASK_ULL(h, l);
203
}
204
205
return (addr & ~mask) >> IMSIC_MMIO_PAGE_SHIFT;
206
}
207
208
static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
209
{
210
u32 hart = 0, group = 0;
211
212
if (aia->nr_hart_bits)
213
hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
214
GENMASK_ULL(aia->nr_hart_bits - 1, 0);
215
if (aia->nr_group_bits)
216
group = (addr >> aia->nr_group_shift) &
217
GENMASK_ULL(aia->nr_group_bits - 1, 0);
218
219
return (group << aia->nr_hart_bits) | hart;
220
}
221
222
static int aia_init(struct kvm *kvm)
223
{
224
int ret, i;
225
unsigned long idx;
226
struct kvm_vcpu *vcpu;
227
struct kvm_vcpu_aia *vaia;
228
struct kvm_aia *aia = &kvm->arch.aia;
229
gpa_t base_ppn = KVM_RISCV_AIA_UNDEF_ADDR;
230
231
/* Irqchip can be initialized only once */
232
if (kvm_riscv_aia_initialized(kvm))
233
return -EBUSY;
234
235
/* We might be in the middle of creating a VCPU? */
236
if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
237
return -EBUSY;
238
239
/* Number of sources should be less than or equals number of IDs */
240
if (aia->nr_ids < aia->nr_sources)
241
return -EINVAL;
242
243
/* APLIC base is required for non-zero number of sources */
244
if (aia->nr_sources && aia->aplic_addr == KVM_RISCV_AIA_UNDEF_ADDR)
245
return -EINVAL;
246
247
/* Initialize APLIC */
248
ret = kvm_riscv_aia_aplic_init(kvm);
249
if (ret)
250
return ret;
251
252
/* Iterate over each VCPU */
253
kvm_for_each_vcpu(idx, vcpu, kvm) {
254
vaia = &vcpu->arch.aia_context;
255
256
/* IMSIC base is required */
257
if (vaia->imsic_addr == KVM_RISCV_AIA_UNDEF_ADDR) {
258
ret = -EINVAL;
259
goto fail_cleanup_imsics;
260
}
261
262
/* All IMSICs should have matching base PPN */
263
if (base_ppn == KVM_RISCV_AIA_UNDEF_ADDR)
264
base_ppn = aia_imsic_ppn(aia, vaia->imsic_addr);
265
if (base_ppn != aia_imsic_ppn(aia, vaia->imsic_addr)) {
266
ret = -EINVAL;
267
goto fail_cleanup_imsics;
268
}
269
270
/* Update HART index of the IMSIC based on IMSIC base */
271
vaia->hart_index = aia_imsic_hart_index(aia,
272
vaia->imsic_addr);
273
274
/* Initialize IMSIC for this VCPU */
275
ret = kvm_riscv_vcpu_aia_imsic_init(vcpu);
276
if (ret)
277
goto fail_cleanup_imsics;
278
}
279
280
/* Set the initialized flag */
281
kvm->arch.aia.initialized = true;
282
283
return 0;
284
285
fail_cleanup_imsics:
286
for (i = idx - 1; i >= 0; i--) {
287
vcpu = kvm_get_vcpu(kvm, i);
288
if (!vcpu)
289
continue;
290
kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
291
}
292
kvm_riscv_aia_aplic_cleanup(kvm);
293
return ret;
294
}
295
296
static int aia_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
297
{
298
u32 nr;
299
u64 addr;
300
int nr_vcpus, r = -ENXIO;
301
unsigned long v, type = (unsigned long)attr->attr;
302
void __user *uaddr = (void __user *)(long)attr->addr;
303
304
switch (attr->group) {
305
case KVM_DEV_RISCV_AIA_GRP_CONFIG:
306
if (copy_from_user(&nr, uaddr, sizeof(nr)))
307
return -EFAULT;
308
309
mutex_lock(&dev->kvm->lock);
310
r = aia_config(dev->kvm, type, &nr, true);
311
mutex_unlock(&dev->kvm->lock);
312
313
break;
314
315
case KVM_DEV_RISCV_AIA_GRP_ADDR:
316
if (copy_from_user(&addr, uaddr, sizeof(addr)))
317
return -EFAULT;
318
319
nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
320
mutex_lock(&dev->kvm->lock);
321
if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
322
r = aia_aplic_addr(dev->kvm, &addr, true);
323
else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
324
r = aia_imsic_addr(dev->kvm, &addr,
325
type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), true);
326
mutex_unlock(&dev->kvm->lock);
327
328
break;
329
330
case KVM_DEV_RISCV_AIA_GRP_CTRL:
331
switch (type) {
332
case KVM_DEV_RISCV_AIA_CTRL_INIT:
333
mutex_lock(&dev->kvm->lock);
334
r = aia_init(dev->kvm);
335
mutex_unlock(&dev->kvm->lock);
336
break;
337
}
338
339
break;
340
case KVM_DEV_RISCV_AIA_GRP_APLIC:
341
if (copy_from_user(&nr, uaddr, sizeof(nr)))
342
return -EFAULT;
343
344
mutex_lock(&dev->kvm->lock);
345
r = kvm_riscv_aia_aplic_set_attr(dev->kvm, type, nr);
346
mutex_unlock(&dev->kvm->lock);
347
348
break;
349
case KVM_DEV_RISCV_AIA_GRP_IMSIC:
350
if (copy_from_user(&v, uaddr, sizeof(v)))
351
return -EFAULT;
352
353
mutex_lock(&dev->kvm->lock);
354
r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, true, &v);
355
mutex_unlock(&dev->kvm->lock);
356
357
break;
358
}
359
360
return r;
361
}
362
363
static int aia_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
364
{
365
u32 nr;
366
u64 addr;
367
int nr_vcpus, r = -ENXIO;
368
void __user *uaddr = (void __user *)(long)attr->addr;
369
unsigned long v, type = (unsigned long)attr->attr;
370
371
switch (attr->group) {
372
case KVM_DEV_RISCV_AIA_GRP_CONFIG:
373
if (copy_from_user(&nr, uaddr, sizeof(nr)))
374
return -EFAULT;
375
376
mutex_lock(&dev->kvm->lock);
377
r = aia_config(dev->kvm, type, &nr, false);
378
mutex_unlock(&dev->kvm->lock);
379
if (r)
380
return r;
381
382
if (copy_to_user(uaddr, &nr, sizeof(nr)))
383
return -EFAULT;
384
385
break;
386
case KVM_DEV_RISCV_AIA_GRP_ADDR:
387
if (copy_from_user(&addr, uaddr, sizeof(addr)))
388
return -EFAULT;
389
390
nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
391
mutex_lock(&dev->kvm->lock);
392
if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
393
r = aia_aplic_addr(dev->kvm, &addr, false);
394
else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
395
r = aia_imsic_addr(dev->kvm, &addr,
396
type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), false);
397
mutex_unlock(&dev->kvm->lock);
398
if (r)
399
return r;
400
401
if (copy_to_user(uaddr, &addr, sizeof(addr)))
402
return -EFAULT;
403
404
break;
405
case KVM_DEV_RISCV_AIA_GRP_APLIC:
406
if (copy_from_user(&nr, uaddr, sizeof(nr)))
407
return -EFAULT;
408
409
mutex_lock(&dev->kvm->lock);
410
r = kvm_riscv_aia_aplic_get_attr(dev->kvm, type, &nr);
411
mutex_unlock(&dev->kvm->lock);
412
if (r)
413
return r;
414
415
if (copy_to_user(uaddr, &nr, sizeof(nr)))
416
return -EFAULT;
417
418
break;
419
case KVM_DEV_RISCV_AIA_GRP_IMSIC:
420
if (copy_from_user(&v, uaddr, sizeof(v)))
421
return -EFAULT;
422
423
mutex_lock(&dev->kvm->lock);
424
r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, false, &v);
425
mutex_unlock(&dev->kvm->lock);
426
if (r)
427
return r;
428
429
if (copy_to_user(uaddr, &v, sizeof(v)))
430
return -EFAULT;
431
432
break;
433
}
434
435
return r;
436
}
437
438
static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
439
{
440
int nr_vcpus;
441
442
switch (attr->group) {
443
case KVM_DEV_RISCV_AIA_GRP_CONFIG:
444
switch (attr->attr) {
445
case KVM_DEV_RISCV_AIA_CONFIG_MODE:
446
case KVM_DEV_RISCV_AIA_CONFIG_IDS:
447
case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
448
case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
449
case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
450
case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
451
case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
452
return 0;
453
}
454
break;
455
case KVM_DEV_RISCV_AIA_GRP_ADDR:
456
nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
457
if (attr->attr == KVM_DEV_RISCV_AIA_ADDR_APLIC)
458
return 0;
459
else if (attr->attr < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
460
return 0;
461
break;
462
case KVM_DEV_RISCV_AIA_GRP_CTRL:
463
switch (attr->attr) {
464
case KVM_DEV_RISCV_AIA_CTRL_INIT:
465
return 0;
466
}
467
break;
468
case KVM_DEV_RISCV_AIA_GRP_APLIC:
469
return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr);
470
case KVM_DEV_RISCV_AIA_GRP_IMSIC:
471
return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr);
472
}
473
474
return -ENXIO;
475
}
476
477
struct kvm_device_ops kvm_riscv_aia_device_ops = {
478
.name = "kvm-riscv-aia",
479
.create = aia_create,
480
.destroy = aia_destroy,
481
.set_attr = aia_set_attr,
482
.get_attr = aia_get_attr,
483
.has_attr = aia_has_attr,
484
};
485
486
int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
487
{
488
/* Proceed only if AIA was initialized successfully */
489
if (!kvm_riscv_aia_initialized(vcpu->kvm))
490
return 1;
491
492
/* Update the IMSIC HW state before entering guest mode */
493
return kvm_riscv_vcpu_aia_imsic_update(vcpu);
494
}
495
496
void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
497
{
498
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
499
500
if (!kvm_riscv_aia_available())
501
return;
502
memset(csr, 0, sizeof(*csr));
503
504
/* Proceed only if AIA was initialized successfully */
505
if (!kvm_riscv_aia_initialized(vcpu->kvm))
506
return;
507
508
/* Reset the IMSIC context */
509
kvm_riscv_vcpu_aia_imsic_reset(vcpu);
510
}
511
512
void kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
513
{
514
struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
515
516
if (!kvm_riscv_aia_available())
517
return;
518
519
/*
520
* We don't do any memory allocations over here because these
521
* will be done after AIA device is initialized by the user-space.
522
*
523
* Refer, aia_init() implementation for more details.
524
*/
525
526
/* Initialize default values in AIA vcpu context */
527
vaia->imsic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
528
vaia->hart_index = vcpu->vcpu_idx;
529
}
530
531
void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
532
{
533
/* Proceed only if AIA was initialized successfully */
534
if (!kvm_riscv_aia_initialized(vcpu->kvm))
535
return;
536
537
/* Cleanup IMSIC context */
538
kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
539
}
540
541
int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
542
u32 guest_index, u32 iid)
543
{
544
unsigned long idx;
545
struct kvm_vcpu *vcpu;
546
547
/* Proceed only if AIA was initialized successfully */
548
if (!kvm_riscv_aia_initialized(kvm))
549
return -EBUSY;
550
551
/* Inject MSI to matching VCPU */
552
kvm_for_each_vcpu(idx, vcpu, kvm) {
553
if (vcpu->arch.aia_context.hart_index == hart_index)
554
return kvm_riscv_vcpu_aia_imsic_inject(vcpu,
555
guest_index,
556
0, iid);
557
}
558
559
return 0;
560
}
561
562
int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
563
{
564
gpa_t tppn, ippn;
565
unsigned long idx;
566
struct kvm_vcpu *vcpu;
567
u32 g, toff, iid = msi->data;
568
struct kvm_aia *aia = &kvm->arch.aia;
569
gpa_t target = (((gpa_t)msi->address_hi) << 32) | msi->address_lo;
570
571
/* Proceed only if AIA was initialized successfully */
572
if (!kvm_riscv_aia_initialized(kvm))
573
return -EBUSY;
574
575
/* Convert target address to target PPN */
576
tppn = target >> IMSIC_MMIO_PAGE_SHIFT;
577
578
/* Extract and clear Guest ID from target PPN */
579
g = tppn & (BIT(aia->nr_guest_bits) - 1);
580
tppn &= ~((gpa_t)(BIT(aia->nr_guest_bits) - 1));
581
582
/* Inject MSI to matching VCPU */
583
kvm_for_each_vcpu(idx, vcpu, kvm) {
584
ippn = vcpu->arch.aia_context.imsic_addr >>
585
IMSIC_MMIO_PAGE_SHIFT;
586
if (ippn == tppn) {
587
toff = target & (IMSIC_MMIO_PAGE_SZ - 1);
588
return kvm_riscv_vcpu_aia_imsic_inject(vcpu, g,
589
toff, iid);
590
}
591
}
592
593
return 0;
594
}
595
596
int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level)
597
{
598
/* Proceed only if AIA was initialized successfully */
599
if (!kvm_riscv_aia_initialized(kvm))
600
return -EBUSY;
601
602
/* Inject interrupt level change in APLIC */
603
return kvm_riscv_aia_aplic_inject(kvm, irq, level);
604
}
605
606
void kvm_riscv_aia_init_vm(struct kvm *kvm)
607
{
608
struct kvm_aia *aia = &kvm->arch.aia;
609
610
if (!kvm_riscv_aia_available())
611
return;
612
613
/*
614
* We don't do any memory allocations over here because these
615
* will be done after AIA device is initialized by the user-space.
616
*
617
* Refer, aia_init() implementation for more details.
618
*/
619
620
/* Initialize default values in AIA global context */
621
aia->mode = (kvm_riscv_aia_nr_hgei) ?
622
KVM_DEV_RISCV_AIA_MODE_AUTO : KVM_DEV_RISCV_AIA_MODE_EMUL;
623
aia->nr_ids = kvm_riscv_aia_max_ids - 1;
624
aia->nr_sources = 0;
625
aia->nr_group_bits = 0;
626
aia->nr_group_shift = KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN;
627
aia->nr_hart_bits = 0;
628
aia->nr_guest_bits = 0;
629
aia->aplic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
630
}
631
632
void kvm_riscv_aia_destroy_vm(struct kvm *kvm)
633
{
634
/* Proceed only if AIA was initialized successfully */
635
if (!kvm_riscv_aia_initialized(kvm))
636
return;
637
638
/* Cleanup APLIC context */
639
kvm_riscv_aia_aplic_cleanup(kvm);
640
}
641
642