Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/kvm/intc/eiointc.c
52674 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2024 Loongson Technology Corporation Limited
4
*/
5
6
#include <asm/kvm_eiointc.h>
7
#include <asm/kvm_vcpu.h>
8
#include <linux/count_zeros.h>
9
10
static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
11
{
12
int ipnum, cpu, cpuid, irq;
13
struct kvm_vcpu *vcpu;
14
15
for (irq = 0; irq < EIOINTC_IRQS; irq++) {
16
ipnum = (s->ipmap >> (irq / 32 * 8)) & 0xff;
17
if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
18
ipnum = count_trailing_zeros(ipnum);
19
ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
20
}
21
22
cpuid = ((u8 *)s->coremap)[irq];
23
vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
24
if (!vcpu)
25
continue;
26
27
cpu = vcpu->vcpu_id;
28
if (test_bit(irq, (unsigned long *)s->coreisr[cpu]))
29
__set_bit(irq, s->sw_coreisr[cpu][ipnum]);
30
else
31
__clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
32
}
33
}
34
35
static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
36
{
37
int ipnum, cpu, found;
38
struct kvm_vcpu *vcpu;
39
struct kvm_interrupt vcpu_irq;
40
41
ipnum = (s->ipmap >> (irq / 32 * 8)) & 0xff;
42
if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
43
ipnum = count_trailing_zeros(ipnum);
44
ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
45
}
46
47
cpu = s->sw_coremap[irq];
48
vcpu = kvm_get_vcpu_by_id(s->kvm, cpu);
49
if (unlikely(vcpu == NULL)) {
50
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
51
return;
52
}
53
54
if (level) {
55
/* if not enable return false */
56
if (!test_bit(irq, (unsigned long *)s->enable))
57
return;
58
__set_bit(irq, (unsigned long *)s->coreisr[cpu]);
59
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
60
__set_bit(irq, s->sw_coreisr[cpu][ipnum]);
61
} else {
62
__clear_bit(irq, (unsigned long *)s->coreisr[cpu]);
63
__clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
64
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
65
}
66
67
if (found < EIOINTC_IRQS)
68
return; /* other irq is handling, needn't update parent irq */
69
70
vcpu_irq.irq = level ? (INT_HWI0 + ipnum) : -(INT_HWI0 + ipnum);
71
kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq);
72
}
73
74
static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s,
75
int irq, u64 val, u32 len, bool notify)
76
{
77
int i, cpu, cpuid;
78
struct kvm_vcpu *vcpu;
79
80
for (i = 0; i < len; i++) {
81
cpuid = val & 0xff;
82
val = val >> 8;
83
84
if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) {
85
cpuid = ffs(cpuid) - 1;
86
cpuid = (cpuid >= 4) ? 0 : cpuid;
87
}
88
89
vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
90
if (!vcpu)
91
continue;
92
93
cpu = vcpu->vcpu_id;
94
if (s->sw_coremap[irq + i] == cpu)
95
continue;
96
97
if (notify && test_bit(irq + i, (unsigned long *)s->isr)) {
98
/* lower irq at old cpu and raise irq at new cpu */
99
eiointc_update_irq(s, irq + i, 0);
100
s->sw_coremap[irq + i] = cpu;
101
eiointc_update_irq(s, irq + i, 1);
102
} else {
103
s->sw_coremap[irq + i] = cpu;
104
}
105
}
106
}
107
108
void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
109
{
110
unsigned long flags;
111
unsigned long *isr = (unsigned long *)s->isr;
112
113
spin_lock_irqsave(&s->lock, flags);
114
level ? __set_bit(irq, isr) : __clear_bit(irq, isr);
115
eiointc_update_irq(s, irq, level);
116
spin_unlock_irqrestore(&s->lock, flags);
117
}
118
119
static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
120
gpa_t addr, unsigned long *val)
121
{
122
int index;
123
u64 data = 0;
124
gpa_t offset;
125
126
offset = addr - EIOINTC_BASE;
127
switch (offset) {
128
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
129
index = (offset - EIOINTC_NODETYPE_START) >> 3;
130
data = s->nodetype[index];
131
break;
132
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
133
index = (offset - EIOINTC_IPMAP_START) >> 3;
134
data = s->ipmap;
135
break;
136
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
137
index = (offset - EIOINTC_ENABLE_START) >> 3;
138
data = s->enable[index];
139
break;
140
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
141
index = (offset - EIOINTC_BOUNCE_START) >> 3;
142
data = s->bounce[index];
143
break;
144
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
145
index = (offset - EIOINTC_COREISR_START) >> 3;
146
data = s->coreisr[vcpu->vcpu_id][index];
147
break;
148
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
149
index = (offset - EIOINTC_COREMAP_START) >> 3;
150
data = s->coremap[index];
151
break;
152
default:
153
break;
154
}
155
*val = data;
156
157
return 0;
158
}
159
160
static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
161
struct kvm_io_device *dev,
162
gpa_t addr, int len, void *val)
163
{
164
unsigned long flags, data, offset;
165
struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
166
167
if (!eiointc) {
168
kvm_err("%s: eiointc irqchip not valid!\n", __func__);
169
return 0;
170
}
171
172
if (addr & (len - 1)) {
173
kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
174
return 0;
175
}
176
177
offset = addr & 0x7;
178
addr -= offset;
179
vcpu->stat.eiointc_read_exits++;
180
spin_lock_irqsave(&eiointc->lock, flags);
181
loongarch_eiointc_read(vcpu, eiointc, addr, &data);
182
spin_unlock_irqrestore(&eiointc->lock, flags);
183
184
data = data >> (offset * 8);
185
switch (len) {
186
case 1:
187
*(long *)val = (s8)data;
188
break;
189
case 2:
190
*(long *)val = (s16)data;
191
break;
192
case 4:
193
*(long *)val = (s32)data;
194
break;
195
default:
196
*(long *)val = (long)data;
197
break;
198
}
199
200
return 0;
201
}
202
203
static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
204
struct loongarch_eiointc *s,
205
gpa_t addr, u64 value, u64 field_mask)
206
{
207
int index, irq;
208
u8 cpu;
209
u64 data, old, mask;
210
gpa_t offset;
211
212
offset = addr & 7;
213
mask = field_mask << (offset * 8);
214
data = (value & field_mask) << (offset * 8);
215
216
addr -= offset;
217
offset = addr - EIOINTC_BASE;
218
219
switch (offset) {
220
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
221
index = (offset - EIOINTC_NODETYPE_START) >> 3;
222
old = s->nodetype[index];
223
s->nodetype[index] = (old & ~mask) | data;
224
break;
225
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
226
/*
227
* ipmap cannot be set at runtime, can be set only at the beginning
228
* of irqchip driver, need not update upper irq level
229
*/
230
old = s->ipmap;
231
s->ipmap = (old & ~mask) | data;
232
break;
233
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
234
index = (offset - EIOINTC_ENABLE_START) >> 3;
235
old = s->enable[index];
236
s->enable[index] = (old & ~mask) | data;
237
/*
238
* 1: enable irq.
239
* update irq when isr is set.
240
*/
241
data = s->enable[index] & ~old & s->isr[index];
242
while (data) {
243
irq = __ffs(data);
244
eiointc_update_irq(s, irq + index * 64, 1);
245
data &= ~BIT_ULL(irq);
246
}
247
/*
248
* 0: disable irq.
249
* update irq when isr is set.
250
*/
251
data = ~s->enable[index] & old & s->isr[index];
252
while (data) {
253
irq = __ffs(data);
254
eiointc_update_irq(s, irq + index * 64, 0);
255
data &= ~BIT_ULL(irq);
256
}
257
break;
258
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
259
/* do not emulate hw bounced irq routing */
260
index = (offset - EIOINTC_BOUNCE_START) >> 3;
261
old = s->bounce[index];
262
s->bounce[index] = (old & ~mask) | data;
263
break;
264
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
265
index = (offset - EIOINTC_COREISR_START) >> 3;
266
/* use attrs to get current cpu index */
267
cpu = vcpu->vcpu_id;
268
old = s->coreisr[cpu][index];
269
/* write 1 to clear interrupt */
270
s->coreisr[cpu][index] = old & ~data;
271
data &= old;
272
while (data) {
273
irq = __ffs(data);
274
eiointc_update_irq(s, irq + index * 64, 0);
275
data &= ~BIT_ULL(irq);
276
}
277
break;
278
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
279
index = (offset - EIOINTC_COREMAP_START) >> 3;
280
old = s->coremap[index];
281
s->coremap[index] = (old & ~mask) | data;
282
data = s->coremap[index];
283
eiointc_update_sw_coremap(s, index * 8, data, sizeof(data), true);
284
break;
285
default:
286
break;
287
}
288
289
return 0;
290
}
291
292
static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
293
struct kvm_io_device *dev,
294
gpa_t addr, int len, const void *val)
295
{
296
unsigned long flags, value;
297
struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
298
299
if (!eiointc) {
300
kvm_err("%s: eiointc irqchip not valid!\n", __func__);
301
return 0;
302
}
303
304
if (addr & (len - 1)) {
305
kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
306
return 0;
307
}
308
309
vcpu->stat.eiointc_write_exits++;
310
spin_lock_irqsave(&eiointc->lock, flags);
311
switch (len) {
312
case 1:
313
value = *(unsigned char *)val;
314
loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF);
315
break;
316
case 2:
317
value = *(unsigned short *)val;
318
loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX);
319
break;
320
case 4:
321
value = *(unsigned int *)val;
322
loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX);
323
break;
324
default:
325
value = *(unsigned long *)val;
326
loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX);
327
break;
328
}
329
spin_unlock_irqrestore(&eiointc->lock, flags);
330
331
return 0;
332
}
333
334
static const struct kvm_io_device_ops kvm_eiointc_ops = {
335
.read = kvm_eiointc_read,
336
.write = kvm_eiointc_write,
337
};
338
339
static int kvm_eiointc_virt_read(struct kvm_vcpu *vcpu,
340
struct kvm_io_device *dev,
341
gpa_t addr, int len, void *val)
342
{
343
unsigned long flags;
344
u32 *data = val;
345
struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
346
347
if (!eiointc) {
348
kvm_err("%s: eiointc irqchip not valid!\n", __func__);
349
return 0;
350
}
351
352
addr -= EIOINTC_VIRT_BASE;
353
spin_lock_irqsave(&eiointc->lock, flags);
354
switch (addr) {
355
case EIOINTC_VIRT_FEATURES:
356
*data = eiointc->features;
357
break;
358
case EIOINTC_VIRT_CONFIG:
359
*data = eiointc->status;
360
break;
361
default:
362
break;
363
}
364
spin_unlock_irqrestore(&eiointc->lock, flags);
365
366
return 0;
367
}
368
369
static int kvm_eiointc_virt_write(struct kvm_vcpu *vcpu,
370
struct kvm_io_device *dev,
371
gpa_t addr, int len, const void *val)
372
{
373
unsigned long flags;
374
u32 value = *(u32 *)val;
375
struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
376
377
if (!eiointc) {
378
kvm_err("%s: eiointc irqchip not valid!\n", __func__);
379
return 0;
380
}
381
382
addr -= EIOINTC_VIRT_BASE;
383
spin_lock_irqsave(&eiointc->lock, flags);
384
switch (addr) {
385
case EIOINTC_VIRT_FEATURES:
386
break;
387
case EIOINTC_VIRT_CONFIG:
388
/*
389
* eiointc features can only be set at disabled status
390
*/
391
if ((eiointc->status & BIT(EIOINTC_ENABLE)) && value) {
392
break;
393
}
394
eiointc->status = value & eiointc->features;
395
break;
396
default:
397
break;
398
}
399
spin_unlock_irqrestore(&eiointc->lock, flags);
400
401
return 0;
402
}
403
404
static const struct kvm_io_device_ops kvm_eiointc_virt_ops = {
405
.read = kvm_eiointc_virt_read,
406
.write = kvm_eiointc_virt_write,
407
};
408
409
static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
410
struct kvm_device_attr *attr)
411
{
412
int ret = 0;
413
unsigned long flags;
414
unsigned long type = (unsigned long)attr->attr;
415
u32 i, start_irq, val;
416
void __user *data;
417
struct loongarch_eiointc *s = dev->kvm->arch.eiointc;
418
419
data = (void __user *)attr->addr;
420
switch (type) {
421
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
422
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
423
if (copy_from_user(&val, data, 4))
424
return -EFAULT;
425
break;
426
default:
427
break;
428
}
429
430
spin_lock_irqsave(&s->lock, flags);
431
switch (type) {
432
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU:
433
if (val > EIOINTC_ROUTE_MAX_VCPUS)
434
ret = -EINVAL;
435
else
436
s->num_cpu = val;
437
break;
438
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE:
439
s->features = val;
440
if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION)))
441
s->status |= BIT(EIOINTC_ENABLE);
442
break;
443
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED:
444
eiointc_set_sw_coreisr(s);
445
for (i = 0; i < (EIOINTC_IRQS / 8); i++) {
446
start_irq = i * 8;
447
eiointc_update_sw_coremap(s, start_irq,
448
s->coremap[i], sizeof(u64), false);
449
}
450
break;
451
default:
452
break;
453
}
454
spin_unlock_irqrestore(&s->lock, flags);
455
456
return ret;
457
}
458
459
static int kvm_eiointc_regs_access(struct kvm_device *dev,
460
struct kvm_device_attr *attr,
461
bool is_write, int *data)
462
{
463
int addr, cpu, offset, ret = 0;
464
unsigned long flags;
465
void *p = NULL;
466
struct loongarch_eiointc *s;
467
468
s = dev->kvm->arch.eiointc;
469
addr = attr->attr;
470
cpu = addr >> 16;
471
addr &= 0xffff;
472
switch (addr) {
473
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
474
offset = (addr - EIOINTC_NODETYPE_START) / 4;
475
p = s->nodetype + offset * 4;
476
break;
477
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
478
offset = (addr - EIOINTC_IPMAP_START) / 4;
479
p = &s->ipmap + offset * 4;
480
break;
481
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
482
offset = (addr - EIOINTC_ENABLE_START) / 4;
483
p = s->enable + offset * 4;
484
break;
485
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
486
offset = (addr - EIOINTC_BOUNCE_START) / 4;
487
p = s->bounce + offset * 4;
488
break;
489
case EIOINTC_ISR_START ... EIOINTC_ISR_END:
490
offset = (addr - EIOINTC_ISR_START) / 4;
491
p = s->isr + offset * 4;
492
break;
493
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
494
if (cpu >= s->num_cpu)
495
return -EINVAL;
496
497
offset = (addr - EIOINTC_COREISR_START) / 4;
498
p = s->coreisr[cpu] + offset * 4;
499
break;
500
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
501
offset = (addr - EIOINTC_COREMAP_START) / 4;
502
p = s->coremap + offset * 4;
503
break;
504
default:
505
kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
506
return -EINVAL;
507
}
508
509
spin_lock_irqsave(&s->lock, flags);
510
if (is_write)
511
memcpy(p, data, 4);
512
else
513
memcpy(data, p, 4);
514
spin_unlock_irqrestore(&s->lock, flags);
515
516
return ret;
517
}
518
519
static int kvm_eiointc_sw_status_access(struct kvm_device *dev,
520
struct kvm_device_attr *attr,
521
bool is_write, int *data)
522
{
523
int addr, ret = 0;
524
unsigned long flags;
525
void *p = NULL;
526
struct loongarch_eiointc *s;
527
528
s = dev->kvm->arch.eiointc;
529
addr = attr->attr;
530
addr &= 0xffff;
531
532
switch (addr) {
533
case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU:
534
if (is_write)
535
return ret;
536
537
p = &s->num_cpu;
538
break;
539
case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE:
540
if (is_write)
541
return ret;
542
543
p = &s->features;
544
break;
545
case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE:
546
p = &s->status;
547
break;
548
default:
549
kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);
550
return -EINVAL;
551
}
552
spin_lock_irqsave(&s->lock, flags);
553
if (is_write)
554
memcpy(p, data, 4);
555
else
556
memcpy(data, p, 4);
557
spin_unlock_irqrestore(&s->lock, flags);
558
559
return ret;
560
}
561
562
static int kvm_eiointc_get_attr(struct kvm_device *dev,
563
struct kvm_device_attr *attr)
564
{
565
int ret, data;
566
567
switch (attr->group) {
568
case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
569
ret = kvm_eiointc_regs_access(dev, attr, false, &data);
570
if (ret)
571
return ret;
572
573
if (copy_to_user((void __user *)attr->addr, &data, 4))
574
ret = -EFAULT;
575
576
return ret;
577
case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
578
ret = kvm_eiointc_sw_status_access(dev, attr, false, &data);
579
if (ret)
580
return ret;
581
582
if (copy_to_user((void __user *)attr->addr, &data, 4))
583
ret = -EFAULT;
584
585
return ret;
586
default:
587
return -EINVAL;
588
}
589
}
590
591
static int kvm_eiointc_set_attr(struct kvm_device *dev,
592
struct kvm_device_attr *attr)
593
{
594
int data;
595
596
switch (attr->group) {
597
case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL:
598
return kvm_eiointc_ctrl_access(dev, attr);
599
case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS:
600
if (copy_from_user(&data, (void __user *)attr->addr, 4))
601
return -EFAULT;
602
603
return kvm_eiointc_regs_access(dev, attr, true, &data);
604
case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS:
605
if (copy_from_user(&data, (void __user *)attr->addr, 4))
606
return -EFAULT;
607
608
return kvm_eiointc_sw_status_access(dev, attr, true, &data);
609
default:
610
return -EINVAL;
611
}
612
}
613
614
static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
615
{
616
int ret;
617
struct loongarch_eiointc *s;
618
struct kvm_io_device *device;
619
struct kvm *kvm = dev->kvm;
620
621
/* eiointc has been created */
622
if (kvm->arch.eiointc)
623
return -EINVAL;
624
625
s = kzalloc(sizeof(struct loongarch_eiointc), GFP_KERNEL);
626
if (!s)
627
return -ENOMEM;
628
629
spin_lock_init(&s->lock);
630
s->kvm = kvm;
631
632
/*
633
* Initialize IOCSR device
634
*/
635
device = &s->device;
636
kvm_iodevice_init(device, &kvm_eiointc_ops);
637
mutex_lock(&kvm->slots_lock);
638
ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
639
EIOINTC_BASE, EIOINTC_SIZE, device);
640
mutex_unlock(&kvm->slots_lock);
641
if (ret < 0) {
642
kfree(s);
643
return ret;
644
}
645
646
device = &s->device_vext;
647
kvm_iodevice_init(device, &kvm_eiointc_virt_ops);
648
ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
649
EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device);
650
if (ret < 0) {
651
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device);
652
kfree(s);
653
return ret;
654
}
655
kvm->arch.eiointc = s;
656
657
return 0;
658
}
659
660
static void kvm_eiointc_destroy(struct kvm_device *dev)
661
{
662
struct kvm *kvm;
663
struct loongarch_eiointc *eiointc;
664
665
if (!dev || !dev->kvm || !dev->kvm->arch.eiointc)
666
return;
667
668
kvm = dev->kvm;
669
eiointc = kvm->arch.eiointc;
670
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device);
671
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device_vext);
672
kfree(eiointc);
673
kfree(dev);
674
}
675
676
static struct kvm_device_ops kvm_eiointc_dev_ops = {
677
.name = "kvm-loongarch-eiointc",
678
.create = kvm_eiointc_create,
679
.destroy = kvm_eiointc_destroy,
680
.set_attr = kvm_eiointc_set_attr,
681
.get_attr = kvm_eiointc_get_attr,
682
};
683
684
int kvm_loongarch_register_eiointc_device(void)
685
{
686
return kvm_register_device_ops(&kvm_eiointc_dev_ops, KVM_DEV_TYPE_LOONGARCH_EIOINTC);
687
}
688
689