Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/virt/kvm/assigned-dev.c
10817 views
1
/*
2
* Kernel-based Virtual Machine - device assignment support
3
*
4
* Copyright (C) 2010 Red Hat, Inc. and/or its affiliates.
5
*
6
* This work is licensed under the terms of the GNU GPL, version 2. See
7
* the COPYING file in the top-level directory.
8
*
9
*/
10
11
#include <linux/kvm_host.h>
12
#include <linux/kvm.h>
13
#include <linux/uaccess.h>
14
#include <linux/vmalloc.h>
15
#include <linux/errno.h>
16
#include <linux/spinlock.h>
17
#include <linux/pci.h>
18
#include <linux/interrupt.h>
19
#include <linux/slab.h>
20
#include "irq.h"
21
22
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
23
int assigned_dev_id)
24
{
25
struct list_head *ptr;
26
struct kvm_assigned_dev_kernel *match;
27
28
list_for_each(ptr, head) {
29
match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
30
if (match->assigned_dev_id == assigned_dev_id)
31
return match;
32
}
33
return NULL;
34
}
35
36
static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
37
*assigned_dev, int irq)
38
{
39
int i, index;
40
struct msix_entry *host_msix_entries;
41
42
host_msix_entries = assigned_dev->host_msix_entries;
43
44
index = -1;
45
for (i = 0; i < assigned_dev->entries_nr; i++)
46
if (irq == host_msix_entries[i].vector) {
47
index = i;
48
break;
49
}
50
if (index < 0) {
51
printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
52
return 0;
53
}
54
55
return index;
56
}
57
58
static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
59
{
60
struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
61
u32 vector;
62
int index;
63
64
if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) {
65
spin_lock(&assigned_dev->intx_lock);
66
disable_irq_nosync(irq);
67
assigned_dev->host_irq_disabled = true;
68
spin_unlock(&assigned_dev->intx_lock);
69
}
70
71
if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
72
index = find_index_from_host_irq(assigned_dev, irq);
73
if (index >= 0) {
74
vector = assigned_dev->
75
guest_msix_entries[index].vector;
76
kvm_set_irq(assigned_dev->kvm,
77
assigned_dev->irq_source_id, vector, 1);
78
}
79
} else
80
kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
81
assigned_dev->guest_irq, 1);
82
83
return IRQ_HANDLED;
84
}
85
86
/* Ack the irq line for an assigned device */
87
static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
88
{
89
struct kvm_assigned_dev_kernel *dev;
90
91
if (kian->gsi == -1)
92
return;
93
94
dev = container_of(kian, struct kvm_assigned_dev_kernel,
95
ack_notifier);
96
97
kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
98
99
/* The guest irq may be shared so this ack may be
100
* from another device.
101
*/
102
spin_lock(&dev->intx_lock);
103
if (dev->host_irq_disabled) {
104
enable_irq(dev->host_irq);
105
dev->host_irq_disabled = false;
106
}
107
spin_unlock(&dev->intx_lock);
108
}
109
110
static void deassign_guest_irq(struct kvm *kvm,
111
struct kvm_assigned_dev_kernel *assigned_dev)
112
{
113
kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
114
assigned_dev->ack_notifier.gsi = -1;
115
116
kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
117
assigned_dev->guest_irq, 0);
118
119
if (assigned_dev->irq_source_id != -1)
120
kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
121
assigned_dev->irq_source_id = -1;
122
assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
123
}
124
125
/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
126
static void deassign_host_irq(struct kvm *kvm,
127
struct kvm_assigned_dev_kernel *assigned_dev)
128
{
129
/*
130
* We disable irq here to prevent further events.
131
*
132
* Notice this maybe result in nested disable if the interrupt type is
133
* INTx, but it's OK for we are going to free it.
134
*
135
* If this function is a part of VM destroy, please ensure that till
136
* now, the kvm state is still legal for probably we also have to wait
137
* on a currently running IRQ handler.
138
*/
139
if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
140
int i;
141
for (i = 0; i < assigned_dev->entries_nr; i++)
142
disable_irq(assigned_dev->host_msix_entries[i].vector);
143
144
for (i = 0; i < assigned_dev->entries_nr; i++)
145
free_irq(assigned_dev->host_msix_entries[i].vector,
146
(void *)assigned_dev);
147
148
assigned_dev->entries_nr = 0;
149
kfree(assigned_dev->host_msix_entries);
150
kfree(assigned_dev->guest_msix_entries);
151
pci_disable_msix(assigned_dev->dev);
152
} else {
153
/* Deal with MSI and INTx */
154
disable_irq(assigned_dev->host_irq);
155
156
free_irq(assigned_dev->host_irq, (void *)assigned_dev);
157
158
if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
159
pci_disable_msi(assigned_dev->dev);
160
}
161
162
assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
163
}
164
165
static int kvm_deassign_irq(struct kvm *kvm,
166
struct kvm_assigned_dev_kernel *assigned_dev,
167
unsigned long irq_requested_type)
168
{
169
unsigned long guest_irq_type, host_irq_type;
170
171
if (!irqchip_in_kernel(kvm))
172
return -EINVAL;
173
/* no irq assignment to deassign */
174
if (!assigned_dev->irq_requested_type)
175
return -ENXIO;
176
177
host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
178
guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
179
180
if (host_irq_type)
181
deassign_host_irq(kvm, assigned_dev);
182
if (guest_irq_type)
183
deassign_guest_irq(kvm, assigned_dev);
184
185
return 0;
186
}
187
188
static void kvm_free_assigned_irq(struct kvm *kvm,
189
struct kvm_assigned_dev_kernel *assigned_dev)
190
{
191
kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
192
}
193
194
static void kvm_free_assigned_device(struct kvm *kvm,
195
struct kvm_assigned_dev_kernel
196
*assigned_dev)
197
{
198
kvm_free_assigned_irq(kvm, assigned_dev);
199
200
pci_reset_function(assigned_dev->dev);
201
if (pci_load_and_free_saved_state(assigned_dev->dev,
202
&assigned_dev->pci_saved_state))
203
printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
204
__func__, dev_name(&assigned_dev->dev->dev));
205
else
206
pci_restore_state(assigned_dev->dev);
207
208
pci_release_regions(assigned_dev->dev);
209
pci_disable_device(assigned_dev->dev);
210
pci_dev_put(assigned_dev->dev);
211
212
list_del(&assigned_dev->list);
213
kfree(assigned_dev);
214
}
215
216
void kvm_free_all_assigned_devices(struct kvm *kvm)
217
{
218
struct list_head *ptr, *ptr2;
219
struct kvm_assigned_dev_kernel *assigned_dev;
220
221
list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
222
assigned_dev = list_entry(ptr,
223
struct kvm_assigned_dev_kernel,
224
list);
225
226
kvm_free_assigned_device(kvm, assigned_dev);
227
}
228
}
229
230
static int assigned_device_enable_host_intx(struct kvm *kvm,
231
struct kvm_assigned_dev_kernel *dev)
232
{
233
dev->host_irq = dev->dev->irq;
234
/* Even though this is PCI, we don't want to use shared
235
* interrupts. Sharing host devices with guest-assigned devices
236
* on the same interrupt line is not a happy situation: there
237
* are going to be long delays in accepting, acking, etc.
238
*/
239
if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
240
IRQF_ONESHOT, dev->irq_name, (void *)dev))
241
return -EIO;
242
return 0;
243
}
244
245
#ifdef __KVM_HAVE_MSI
246
static int assigned_device_enable_host_msi(struct kvm *kvm,
247
struct kvm_assigned_dev_kernel *dev)
248
{
249
int r;
250
251
if (!dev->dev->msi_enabled) {
252
r = pci_enable_msi(dev->dev);
253
if (r)
254
return r;
255
}
256
257
dev->host_irq = dev->dev->irq;
258
if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
259
0, dev->irq_name, (void *)dev)) {
260
pci_disable_msi(dev->dev);
261
return -EIO;
262
}
263
264
return 0;
265
}
266
#endif
267
268
#ifdef __KVM_HAVE_MSIX
269
static int assigned_device_enable_host_msix(struct kvm *kvm,
270
struct kvm_assigned_dev_kernel *dev)
271
{
272
int i, r = -EINVAL;
273
274
/* host_msix_entries and guest_msix_entries should have been
275
* initialized */
276
if (dev->entries_nr == 0)
277
return r;
278
279
r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
280
if (r)
281
return r;
282
283
for (i = 0; i < dev->entries_nr; i++) {
284
r = request_threaded_irq(dev->host_msix_entries[i].vector,
285
NULL, kvm_assigned_dev_thread,
286
0, dev->irq_name, (void *)dev);
287
if (r)
288
goto err;
289
}
290
291
return 0;
292
err:
293
for (i -= 1; i >= 0; i--)
294
free_irq(dev->host_msix_entries[i].vector, (void *)dev);
295
pci_disable_msix(dev->dev);
296
return r;
297
}
298
299
#endif
300
301
static int assigned_device_enable_guest_intx(struct kvm *kvm,
302
struct kvm_assigned_dev_kernel *dev,
303
struct kvm_assigned_irq *irq)
304
{
305
dev->guest_irq = irq->guest_irq;
306
dev->ack_notifier.gsi = irq->guest_irq;
307
return 0;
308
}
309
310
#ifdef __KVM_HAVE_MSI
311
static int assigned_device_enable_guest_msi(struct kvm *kvm,
312
struct kvm_assigned_dev_kernel *dev,
313
struct kvm_assigned_irq *irq)
314
{
315
dev->guest_irq = irq->guest_irq;
316
dev->ack_notifier.gsi = -1;
317
dev->host_irq_disabled = false;
318
return 0;
319
}
320
#endif
321
322
#ifdef __KVM_HAVE_MSIX
323
static int assigned_device_enable_guest_msix(struct kvm *kvm,
324
struct kvm_assigned_dev_kernel *dev,
325
struct kvm_assigned_irq *irq)
326
{
327
dev->guest_irq = irq->guest_irq;
328
dev->ack_notifier.gsi = -1;
329
dev->host_irq_disabled = false;
330
return 0;
331
}
332
#endif
333
334
static int assign_host_irq(struct kvm *kvm,
335
struct kvm_assigned_dev_kernel *dev,
336
__u32 host_irq_type)
337
{
338
int r = -EEXIST;
339
340
if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
341
return r;
342
343
snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s",
344
pci_name(dev->dev));
345
346
switch (host_irq_type) {
347
case KVM_DEV_IRQ_HOST_INTX:
348
r = assigned_device_enable_host_intx(kvm, dev);
349
break;
350
#ifdef __KVM_HAVE_MSI
351
case KVM_DEV_IRQ_HOST_MSI:
352
r = assigned_device_enable_host_msi(kvm, dev);
353
break;
354
#endif
355
#ifdef __KVM_HAVE_MSIX
356
case KVM_DEV_IRQ_HOST_MSIX:
357
r = assigned_device_enable_host_msix(kvm, dev);
358
break;
359
#endif
360
default:
361
r = -EINVAL;
362
}
363
364
if (!r)
365
dev->irq_requested_type |= host_irq_type;
366
367
return r;
368
}
369
370
static int assign_guest_irq(struct kvm *kvm,
371
struct kvm_assigned_dev_kernel *dev,
372
struct kvm_assigned_irq *irq,
373
unsigned long guest_irq_type)
374
{
375
int id;
376
int r = -EEXIST;
377
378
if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
379
return r;
380
381
id = kvm_request_irq_source_id(kvm);
382
if (id < 0)
383
return id;
384
385
dev->irq_source_id = id;
386
387
switch (guest_irq_type) {
388
case KVM_DEV_IRQ_GUEST_INTX:
389
r = assigned_device_enable_guest_intx(kvm, dev, irq);
390
break;
391
#ifdef __KVM_HAVE_MSI
392
case KVM_DEV_IRQ_GUEST_MSI:
393
r = assigned_device_enable_guest_msi(kvm, dev, irq);
394
break;
395
#endif
396
#ifdef __KVM_HAVE_MSIX
397
case KVM_DEV_IRQ_GUEST_MSIX:
398
r = assigned_device_enable_guest_msix(kvm, dev, irq);
399
break;
400
#endif
401
default:
402
r = -EINVAL;
403
}
404
405
if (!r) {
406
dev->irq_requested_type |= guest_irq_type;
407
kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
408
} else
409
kvm_free_irq_source_id(kvm, dev->irq_source_id);
410
411
return r;
412
}
413
414
/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
415
static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
416
struct kvm_assigned_irq *assigned_irq)
417
{
418
int r = -EINVAL;
419
struct kvm_assigned_dev_kernel *match;
420
unsigned long host_irq_type, guest_irq_type;
421
422
if (!irqchip_in_kernel(kvm))
423
return r;
424
425
mutex_lock(&kvm->lock);
426
r = -ENODEV;
427
match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
428
assigned_irq->assigned_dev_id);
429
if (!match)
430
goto out;
431
432
host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
433
guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
434
435
r = -EINVAL;
436
/* can only assign one type at a time */
437
if (hweight_long(host_irq_type) > 1)
438
goto out;
439
if (hweight_long(guest_irq_type) > 1)
440
goto out;
441
if (host_irq_type == 0 && guest_irq_type == 0)
442
goto out;
443
444
r = 0;
445
if (host_irq_type)
446
r = assign_host_irq(kvm, match, host_irq_type);
447
if (r)
448
goto out;
449
450
if (guest_irq_type)
451
r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
452
out:
453
mutex_unlock(&kvm->lock);
454
return r;
455
}
456
457
static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
458
struct kvm_assigned_irq
459
*assigned_irq)
460
{
461
int r = -ENODEV;
462
struct kvm_assigned_dev_kernel *match;
463
464
mutex_lock(&kvm->lock);
465
466
match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
467
assigned_irq->assigned_dev_id);
468
if (!match)
469
goto out;
470
471
r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
472
out:
473
mutex_unlock(&kvm->lock);
474
return r;
475
}
476
477
static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
478
struct kvm_assigned_pci_dev *assigned_dev)
479
{
480
int r = 0, idx;
481
struct kvm_assigned_dev_kernel *match;
482
struct pci_dev *dev;
483
484
mutex_lock(&kvm->lock);
485
idx = srcu_read_lock(&kvm->srcu);
486
487
match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
488
assigned_dev->assigned_dev_id);
489
if (match) {
490
/* device already assigned */
491
r = -EEXIST;
492
goto out;
493
}
494
495
match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
496
if (match == NULL) {
497
printk(KERN_INFO "%s: Couldn't allocate memory\n",
498
__func__);
499
r = -ENOMEM;
500
goto out;
501
}
502
dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
503
assigned_dev->busnr,
504
assigned_dev->devfn);
505
if (!dev) {
506
printk(KERN_INFO "%s: host device not found\n", __func__);
507
r = -EINVAL;
508
goto out_free;
509
}
510
if (pci_enable_device(dev)) {
511
printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
512
r = -EBUSY;
513
goto out_put;
514
}
515
r = pci_request_regions(dev, "kvm_assigned_device");
516
if (r) {
517
printk(KERN_INFO "%s: Could not get access to device regions\n",
518
__func__);
519
goto out_disable;
520
}
521
522
pci_reset_function(dev);
523
pci_save_state(dev);
524
match->pci_saved_state = pci_store_saved_state(dev);
525
if (!match->pci_saved_state)
526
printk(KERN_DEBUG "%s: Couldn't store %s saved state\n",
527
__func__, dev_name(&dev->dev));
528
match->assigned_dev_id = assigned_dev->assigned_dev_id;
529
match->host_segnr = assigned_dev->segnr;
530
match->host_busnr = assigned_dev->busnr;
531
match->host_devfn = assigned_dev->devfn;
532
match->flags = assigned_dev->flags;
533
match->dev = dev;
534
spin_lock_init(&match->intx_lock);
535
match->irq_source_id = -1;
536
match->kvm = kvm;
537
match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
538
539
list_add(&match->list, &kvm->arch.assigned_dev_head);
540
541
if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
542
if (!kvm->arch.iommu_domain) {
543
r = kvm_iommu_map_guest(kvm);
544
if (r)
545
goto out_list_del;
546
}
547
r = kvm_assign_device(kvm, match);
548
if (r)
549
goto out_list_del;
550
}
551
552
out:
553
srcu_read_unlock(&kvm->srcu, idx);
554
mutex_unlock(&kvm->lock);
555
return r;
556
out_list_del:
557
if (pci_load_and_free_saved_state(dev, &match->pci_saved_state))
558
printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
559
__func__, dev_name(&dev->dev));
560
list_del(&match->list);
561
pci_release_regions(dev);
562
out_disable:
563
pci_disable_device(dev);
564
out_put:
565
pci_dev_put(dev);
566
out_free:
567
kfree(match);
568
srcu_read_unlock(&kvm->srcu, idx);
569
mutex_unlock(&kvm->lock);
570
return r;
571
}
572
573
static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
574
struct kvm_assigned_pci_dev *assigned_dev)
575
{
576
int r = 0;
577
struct kvm_assigned_dev_kernel *match;
578
579
mutex_lock(&kvm->lock);
580
581
match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
582
assigned_dev->assigned_dev_id);
583
if (!match) {
584
printk(KERN_INFO "%s: device hasn't been assigned before, "
585
"so cannot be deassigned\n", __func__);
586
r = -EINVAL;
587
goto out;
588
}
589
590
if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
591
kvm_deassign_device(kvm, match);
592
593
kvm_free_assigned_device(kvm, match);
594
595
out:
596
mutex_unlock(&kvm->lock);
597
return r;
598
}
599
600
601
#ifdef __KVM_HAVE_MSIX
602
static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
603
struct kvm_assigned_msix_nr *entry_nr)
604
{
605
int r = 0;
606
struct kvm_assigned_dev_kernel *adev;
607
608
mutex_lock(&kvm->lock);
609
610
adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
611
entry_nr->assigned_dev_id);
612
if (!adev) {
613
r = -EINVAL;
614
goto msix_nr_out;
615
}
616
617
if (adev->entries_nr == 0) {
618
adev->entries_nr = entry_nr->entry_nr;
619
if (adev->entries_nr == 0 ||
620
adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) {
621
r = -EINVAL;
622
goto msix_nr_out;
623
}
624
625
adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
626
entry_nr->entry_nr,
627
GFP_KERNEL);
628
if (!adev->host_msix_entries) {
629
r = -ENOMEM;
630
goto msix_nr_out;
631
}
632
adev->guest_msix_entries =
633
kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr,
634
GFP_KERNEL);
635
if (!adev->guest_msix_entries) {
636
kfree(adev->host_msix_entries);
637
r = -ENOMEM;
638
goto msix_nr_out;
639
}
640
} else /* Not allowed set MSI-X number twice */
641
r = -EINVAL;
642
msix_nr_out:
643
mutex_unlock(&kvm->lock);
644
return r;
645
}
646
647
static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
648
struct kvm_assigned_msix_entry *entry)
649
{
650
int r = 0, i;
651
struct kvm_assigned_dev_kernel *adev;
652
653
mutex_lock(&kvm->lock);
654
655
adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
656
entry->assigned_dev_id);
657
658
if (!adev) {
659
r = -EINVAL;
660
goto msix_entry_out;
661
}
662
663
for (i = 0; i < adev->entries_nr; i++)
664
if (adev->guest_msix_entries[i].vector == 0 ||
665
adev->guest_msix_entries[i].entry == entry->entry) {
666
adev->guest_msix_entries[i].entry = entry->entry;
667
adev->guest_msix_entries[i].vector = entry->gsi;
668
adev->host_msix_entries[i].entry = entry->entry;
669
break;
670
}
671
if (i == adev->entries_nr) {
672
r = -ENOSPC;
673
goto msix_entry_out;
674
}
675
676
msix_entry_out:
677
mutex_unlock(&kvm->lock);
678
679
return r;
680
}
681
#endif
682
683
long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
684
unsigned long arg)
685
{
686
void __user *argp = (void __user *)arg;
687
int r;
688
689
switch (ioctl) {
690
case KVM_ASSIGN_PCI_DEVICE: {
691
struct kvm_assigned_pci_dev assigned_dev;
692
693
r = -EFAULT;
694
if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
695
goto out;
696
r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
697
if (r)
698
goto out;
699
break;
700
}
701
case KVM_ASSIGN_IRQ: {
702
r = -EOPNOTSUPP;
703
break;
704
}
705
case KVM_ASSIGN_DEV_IRQ: {
706
struct kvm_assigned_irq assigned_irq;
707
708
r = -EFAULT;
709
if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
710
goto out;
711
r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
712
if (r)
713
goto out;
714
break;
715
}
716
case KVM_DEASSIGN_DEV_IRQ: {
717
struct kvm_assigned_irq assigned_irq;
718
719
r = -EFAULT;
720
if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
721
goto out;
722
r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
723
if (r)
724
goto out;
725
break;
726
}
727
case KVM_DEASSIGN_PCI_DEVICE: {
728
struct kvm_assigned_pci_dev assigned_dev;
729
730
r = -EFAULT;
731
if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
732
goto out;
733
r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
734
if (r)
735
goto out;
736
break;
737
}
738
#ifdef KVM_CAP_IRQ_ROUTING
739
case KVM_SET_GSI_ROUTING: {
740
struct kvm_irq_routing routing;
741
struct kvm_irq_routing __user *urouting;
742
struct kvm_irq_routing_entry *entries;
743
744
r = -EFAULT;
745
if (copy_from_user(&routing, argp, sizeof(routing)))
746
goto out;
747
r = -EINVAL;
748
if (routing.nr >= KVM_MAX_IRQ_ROUTES)
749
goto out;
750
if (routing.flags)
751
goto out;
752
r = -ENOMEM;
753
entries = vmalloc(routing.nr * sizeof(*entries));
754
if (!entries)
755
goto out;
756
r = -EFAULT;
757
urouting = argp;
758
if (copy_from_user(entries, urouting->entries,
759
routing.nr * sizeof(*entries)))
760
goto out_free_irq_routing;
761
r = kvm_set_irq_routing(kvm, entries, routing.nr,
762
routing.flags);
763
out_free_irq_routing:
764
vfree(entries);
765
break;
766
}
767
#endif /* KVM_CAP_IRQ_ROUTING */
768
#ifdef __KVM_HAVE_MSIX
769
case KVM_ASSIGN_SET_MSIX_NR: {
770
struct kvm_assigned_msix_nr entry_nr;
771
r = -EFAULT;
772
if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
773
goto out;
774
r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
775
if (r)
776
goto out;
777
break;
778
}
779
case KVM_ASSIGN_SET_MSIX_ENTRY: {
780
struct kvm_assigned_msix_entry entry;
781
r = -EFAULT;
782
if (copy_from_user(&entry, argp, sizeof entry))
783
goto out;
784
r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
785
if (r)
786
goto out;
787
break;
788
}
789
#endif
790
default:
791
r = -ENOTTY;
792
break;
793
}
794
out:
795
return r;
796
}
797
798
799