Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/amd64/vmm/io/ppt.c
105585 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2011 NetApp, Inc.
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
* SUCH DAMAGE.
27
*/
28
29
#include <sys/param.h>
30
#include <sys/systm.h>
31
#include <sys/bus.h>
32
#include <sys/kernel.h>
33
#include <sys/lock.h>
34
#include <sys/malloc.h>
35
#include <sys/module.h>
36
#include <sys/pciio.h>
37
#include <sys/rman.h>
38
#include <sys/smp.h>
39
#include <sys/sx.h>
40
#include <sys/sysctl.h>
41
42
#include <dev/pci/pcivar.h>
43
#include <dev/pci/pcireg.h>
44
45
#include <machine/resource.h>
46
#include <machine/vmm.h>
47
#include <machine/vmm_dev.h>
48
49
#include <dev/vmm/vmm_ktr.h>
50
51
#include "vmm_lapic.h"
52
53
#include "iommu.h"
54
#include "ppt.h"
55
56
/* XXX locking */
57
58
#define MAX_MSIMSGS 32
59
60
/*
61
* If the MSI-X table is located in the middle of a BAR then that MMIO
62
* region gets split into two segments - one segment above the MSI-X table
63
* and the other segment below the MSI-X table - with a hole in place of
64
* the MSI-X table so accesses to it can be trapped and emulated.
65
*
66
* So, allocate a MMIO segment for each BAR register + 1 additional segment.
67
*/
68
#define MAX_MMIOSEGS ((PCIR_MAX_BAR_0 + 1) + 1)
69
70
MALLOC_DEFINE(M_PPTMSIX, "pptmsix", "Passthru MSI-X resources");
71
72
static struct sx ppt_mtx;
73
SX_SYSINIT(ppt_mtx, &ppt_mtx, "ppt_mtx");
74
#define PPT_LOCK() sx_xlock(&ppt_mtx)
75
#define PPT_UNLOCK() sx_xunlock(&ppt_mtx)
76
#define PPT_ASSERT_LOCKED() sx_assert(&ppt_mtx, SA_XLOCKED)
77
78
struct pptintr_arg { /* pptintr(pptintr_arg) */
79
struct pptdev *pptdev;
80
uint64_t addr;
81
uint64_t msg_data;
82
};
83
84
struct pptseg {
85
vm_paddr_t gpa;
86
size_t len;
87
int wired;
88
};
89
90
struct pptdev {
91
device_t dev;
92
struct vm *vm; /* owner of this device */
93
TAILQ_ENTRY(pptdev) next;
94
struct pptseg mmio[MAX_MMIOSEGS];
95
struct {
96
int num_msgs; /* guest state */
97
98
int startrid; /* host state */
99
struct resource *res[MAX_MSIMSGS];
100
void *cookie[MAX_MSIMSGS];
101
struct pptintr_arg arg[MAX_MSIMSGS];
102
} msi;
103
104
struct {
105
int num_msgs;
106
int startrid;
107
int msix_table_rid;
108
int msix_pba_rid;
109
struct resource *msix_table_res;
110
struct resource *msix_pba_res;
111
struct resource **res;
112
void **cookie;
113
struct pptintr_arg *arg;
114
} msix;
115
};
116
117
SYSCTL_DECL(_hw_vmm);
118
SYSCTL_NODE(_hw_vmm, OID_AUTO, ppt, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
119
"bhyve passthru devices");
120
121
static int num_pptdevs;
122
SYSCTL_INT(_hw_vmm_ppt, OID_AUTO, devices, CTLFLAG_RD, &num_pptdevs, 0,
123
"number of pci passthru devices");
124
125
static TAILQ_HEAD(, pptdev) pptdev_list = TAILQ_HEAD_INITIALIZER(pptdev_list);
126
127
static int
128
ppt_probe(device_t dev)
129
{
130
int bus, slot, func;
131
struct pci_devinfo *dinfo;
132
133
dinfo = (struct pci_devinfo *)device_get_ivars(dev);
134
135
bus = pci_get_bus(dev);
136
slot = pci_get_slot(dev);
137
func = pci_get_function(dev);
138
139
/*
140
* To qualify as a pci passthrough device a device must:
141
* - be allowed by administrator to be used in this role
142
* - be an endpoint device
143
*/
144
if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
145
return (ENXIO);
146
else if (vmm_is_pptdev(bus, slot, func))
147
return (0);
148
else
149
/*
150
* Returning BUS_PROBE_NOWILDCARD here matches devices that the
151
* SR-IOV infrastructure specified as "ppt" passthrough devices.
152
* All normal devices that did not have "ppt" specified as their
153
* driver will not be matched by this.
154
*/
155
return (BUS_PROBE_NOWILDCARD);
156
}
157
158
static int
159
ppt_attach(device_t dev)
160
{
161
struct pptdev *ppt;
162
uint16_t cmd, cmd1;
163
int error;
164
165
ppt = device_get_softc(dev);
166
167
PPT_LOCK();
168
cmd1 = cmd = pci_read_config(dev, PCIR_COMMAND, 2);
169
cmd &= ~(PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
170
pci_write_config(dev, PCIR_COMMAND, cmd, 2);
171
error = iommu_remove_device(iommu_host_domain(), dev, pci_get_rid(dev));
172
if (error != 0) {
173
pci_write_config(dev, PCIR_COMMAND, cmd1, 2);
174
PPT_UNLOCK();
175
return (error);
176
}
177
num_pptdevs++;
178
TAILQ_INSERT_TAIL(&pptdev_list, ppt, next);
179
ppt->dev = dev;
180
PPT_UNLOCK();
181
182
if (bootverbose)
183
device_printf(dev, "attached\n");
184
185
return (0);
186
}
187
188
static int
189
ppt_detach(device_t dev)
190
{
191
struct pptdev *ppt;
192
int error;
193
194
error = 0;
195
ppt = device_get_softc(dev);
196
197
PPT_LOCK();
198
if (ppt->vm != NULL) {
199
error = EBUSY;
200
goto out;
201
}
202
if (iommu_host_domain() != NULL) {
203
error = iommu_add_device(iommu_host_domain(), dev,
204
pci_get_rid(dev));
205
if (error != 0)
206
goto out;
207
}
208
num_pptdevs--;
209
TAILQ_REMOVE(&pptdev_list, ppt, next);
210
out:
211
PPT_UNLOCK();
212
213
return (error);
214
}
215
216
static device_method_t ppt_methods[] = {
217
/* Device interface */
218
DEVMETHOD(device_probe, ppt_probe),
219
DEVMETHOD(device_attach, ppt_attach),
220
DEVMETHOD(device_detach, ppt_detach),
221
{0, 0}
222
};
223
224
DEFINE_CLASS_0(ppt, ppt_driver, ppt_methods, sizeof(struct pptdev));
225
DRIVER_MODULE(ppt, pci, ppt_driver, NULL, NULL);
226
227
static int
228
ppt_find(struct vm *vm, int bus, int slot, int func, struct pptdev **pptp)
229
{
230
device_t dev;
231
struct pptdev *ppt;
232
int b, s, f;
233
234
PPT_ASSERT_LOCKED();
235
236
TAILQ_FOREACH(ppt, &pptdev_list, next) {
237
dev = ppt->dev;
238
b = pci_get_bus(dev);
239
s = pci_get_slot(dev);
240
f = pci_get_function(dev);
241
if (bus == b && slot == s && func == f)
242
break;
243
}
244
245
if (ppt == NULL)
246
return (ENOENT);
247
if (ppt->vm != vm) /* Make sure we own this device */
248
return (EBUSY);
249
*pptp = ppt;
250
return (0);
251
}
252
253
static void
254
ppt_unmap_all_mmio(struct vm *vm, struct pptdev *ppt)
255
{
256
int i;
257
struct pptseg *seg;
258
259
for (i = 0; i < MAX_MMIOSEGS; i++) {
260
seg = &ppt->mmio[i];
261
if (seg->len == 0)
262
continue;
263
(void)vm_unmap_mmio(vm, seg->gpa, seg->len);
264
bzero(seg, sizeof(struct pptseg));
265
}
266
}
267
268
static void
269
ppt_teardown_msi(struct pptdev *ppt)
270
{
271
int i, rid;
272
void *cookie;
273
struct resource *res;
274
275
if (ppt->msi.num_msgs == 0)
276
return;
277
278
for (i = 0; i < ppt->msi.num_msgs; i++) {
279
rid = ppt->msi.startrid + i;
280
res = ppt->msi.res[i];
281
cookie = ppt->msi.cookie[i];
282
283
if (cookie != NULL)
284
bus_teardown_intr(ppt->dev, res, cookie);
285
286
if (res != NULL)
287
bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res);
288
289
ppt->msi.res[i] = NULL;
290
ppt->msi.cookie[i] = NULL;
291
}
292
293
if (ppt->msi.startrid == 1)
294
pci_release_msi(ppt->dev);
295
296
ppt->msi.num_msgs = 0;
297
}
298
299
static void
300
ppt_teardown_msix_intr(struct pptdev *ppt, int idx)
301
{
302
int rid;
303
struct resource *res;
304
void *cookie;
305
306
rid = ppt->msix.startrid + idx;
307
res = ppt->msix.res[idx];
308
cookie = ppt->msix.cookie[idx];
309
310
if (cookie != NULL)
311
bus_teardown_intr(ppt->dev, res, cookie);
312
313
if (res != NULL)
314
bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res);
315
316
ppt->msix.res[idx] = NULL;
317
ppt->msix.cookie[idx] = NULL;
318
}
319
320
static void
321
ppt_teardown_msix(struct pptdev *ppt)
322
{
323
int i;
324
325
if (ppt->msix.num_msgs == 0)
326
return;
327
328
for (i = 0; i < ppt->msix.num_msgs; i++)
329
ppt_teardown_msix_intr(ppt, i);
330
331
free(ppt->msix.res, M_PPTMSIX);
332
free(ppt->msix.cookie, M_PPTMSIX);
333
free(ppt->msix.arg, M_PPTMSIX);
334
335
pci_release_msi(ppt->dev);
336
337
if (ppt->msix.msix_table_res) {
338
bus_release_resource(ppt->dev, SYS_RES_MEMORY,
339
ppt->msix.msix_table_rid,
340
ppt->msix.msix_table_res);
341
ppt->msix.msix_table_res = NULL;
342
ppt->msix.msix_table_rid = 0;
343
}
344
if (ppt->msix.msix_pba_res) {
345
bus_release_resource(ppt->dev, SYS_RES_MEMORY,
346
ppt->msix.msix_pba_rid,
347
ppt->msix.msix_pba_res);
348
ppt->msix.msix_pba_res = NULL;
349
ppt->msix.msix_pba_rid = 0;
350
}
351
352
ppt->msix.num_msgs = 0;
353
}
354
355
int
356
ppt_assigned_devices(struct vm *vm)
357
{
358
struct pptdev *ppt;
359
int num;
360
361
num = 0;
362
TAILQ_FOREACH(ppt, &pptdev_list, next) {
363
if (ppt->vm == vm)
364
num++;
365
}
366
return (num);
367
}
368
369
bool
370
ppt_is_mmio(struct vm *vm, vm_paddr_t gpa)
371
{
372
int i;
373
struct pptdev *ppt;
374
struct pptseg *seg;
375
376
TAILQ_FOREACH(ppt, &pptdev_list, next) {
377
if (ppt->vm != vm)
378
continue;
379
380
for (i = 0; i < MAX_MMIOSEGS; i++) {
381
seg = &ppt->mmio[i];
382
if (seg->len == 0)
383
continue;
384
if (gpa >= seg->gpa && gpa < seg->gpa + seg->len)
385
return (true);
386
}
387
}
388
389
return (false);
390
}
391
392
static void
393
ppt_pci_reset(device_t dev)
394
{
395
396
if (pcie_flr(dev,
397
max(pcie_get_max_completion_timeout(dev) / 1000, 10), true))
398
return;
399
400
pci_power_reset(dev);
401
}
402
403
static uint16_t
404
ppt_bar_enables(struct pptdev *ppt)
405
{
406
struct pci_map *pm;
407
uint16_t cmd;
408
409
cmd = 0;
410
for (pm = pci_first_bar(ppt->dev); pm != NULL; pm = pci_next_bar(pm)) {
411
if (PCI_BAR_IO(pm->pm_value))
412
cmd |= PCIM_CMD_PORTEN;
413
if (PCI_BAR_MEM(pm->pm_value))
414
cmd |= PCIM_CMD_MEMEN;
415
}
416
return (cmd);
417
}
418
419
int
420
ppt_assign_device(struct vm *vm, int bus, int slot, int func)
421
{
422
struct pptdev *ppt;
423
int error;
424
uint16_t cmd;
425
426
PPT_LOCK();
427
/* Passing NULL requires the device to be unowned. */
428
error = ppt_find(NULL, bus, slot, func, &ppt);
429
if (error != 0)
430
goto out;
431
432
pci_save_state(ppt->dev);
433
ppt_pci_reset(ppt->dev);
434
pci_restore_state(ppt->dev);
435
error = iommu_add_device(vm_iommu_domain(vm), ppt->dev,
436
pci_get_rid(ppt->dev));
437
if (error != 0)
438
goto out;
439
ppt->vm = vm;
440
cmd = pci_read_config(ppt->dev, PCIR_COMMAND, 2);
441
cmd |= PCIM_CMD_BUSMASTEREN | ppt_bar_enables(ppt);
442
pci_write_config(ppt->dev, PCIR_COMMAND, cmd, 2);
443
out:
444
PPT_UNLOCK();
445
return (error);
446
}
447
448
int
449
ppt_unassign_device(struct vm *vm, int bus, int slot, int func)
450
{
451
struct pptdev *ppt;
452
int error;
453
uint16_t cmd;
454
455
PPT_LOCK();
456
error = ppt_find(vm, bus, slot, func, &ppt);
457
if (error != 0)
458
goto out;
459
460
cmd = pci_read_config(ppt->dev, PCIR_COMMAND, 2);
461
cmd &= ~(PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
462
pci_write_config(ppt->dev, PCIR_COMMAND, cmd, 2);
463
pci_save_state(ppt->dev);
464
ppt_pci_reset(ppt->dev);
465
pci_restore_state(ppt->dev);
466
ppt_unmap_all_mmio(vm, ppt);
467
ppt_teardown_msi(ppt);
468
ppt_teardown_msix(ppt);
469
error = iommu_remove_device(vm_iommu_domain(vm), ppt->dev,
470
pci_get_rid(ppt->dev));
471
ppt->vm = NULL;
472
out:
473
PPT_UNLOCK();
474
return (error);
475
}
476
477
int
478
ppt_unassign_all(struct vm *vm)
479
{
480
struct pptdev *ppt;
481
int bus, slot, func;
482
device_t dev;
483
484
TAILQ_FOREACH(ppt, &pptdev_list, next) {
485
if (ppt->vm == vm) {
486
dev = ppt->dev;
487
bus = pci_get_bus(dev);
488
slot = pci_get_slot(dev);
489
func = pci_get_function(dev);
490
vm_unassign_pptdev(vm, bus, slot, func);
491
}
492
}
493
494
return (0);
495
}
496
497
static bool
498
ppt_valid_bar_mapping(struct pptdev *ppt, vm_paddr_t hpa, size_t len)
499
{
500
struct pci_map *pm;
501
pci_addr_t base, size;
502
503
for (pm = pci_first_bar(ppt->dev); pm != NULL; pm = pci_next_bar(pm)) {
504
if (!PCI_BAR_MEM(pm->pm_value))
505
continue;
506
base = pm->pm_value & PCIM_BAR_MEM_BASE;
507
size = (pci_addr_t)1 << pm->pm_size;
508
if (hpa >= base && hpa + len <= base + size)
509
return (true);
510
}
511
return (false);
512
}
513
514
int
515
ppt_map_mmio(struct vm *vm, int bus, int slot, int func,
516
vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
517
{
518
int i, error;
519
struct pptseg *seg;
520
struct pptdev *ppt;
521
522
if (len % PAGE_SIZE != 0 || len == 0 || gpa % PAGE_SIZE != 0 ||
523
hpa % PAGE_SIZE != 0 || gpa + len < gpa || hpa + len < hpa)
524
return (EINVAL);
525
526
PPT_LOCK();
527
error = ppt_find(vm, bus, slot, func, &ppt);
528
if (error)
529
goto out;
530
531
if (!ppt_valid_bar_mapping(ppt, hpa, len)) {
532
error = EINVAL;
533
goto out;
534
}
535
536
error = ENOSPC;
537
for (i = 0; i < MAX_MMIOSEGS; i++) {
538
seg = &ppt->mmio[i];
539
if (seg->len == 0) {
540
error = vm_map_mmio(vm, gpa, len, hpa);
541
if (error == 0) {
542
seg->gpa = gpa;
543
seg->len = len;
544
}
545
break;
546
}
547
}
548
out:
549
PPT_UNLOCK();
550
return (error);
551
}
552
553
int
554
ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func,
555
vm_paddr_t gpa, size_t len)
556
{
557
int i, error;
558
struct pptseg *seg;
559
struct pptdev *ppt;
560
561
PPT_LOCK();
562
error = ppt_find(vm, bus, slot, func, &ppt);
563
if (error)
564
goto out;
565
566
error = ENOENT;
567
for (i = 0; i < MAX_MMIOSEGS; i++) {
568
seg = &ppt->mmio[i];
569
if (seg->gpa == gpa && seg->len == len) {
570
error = vm_unmap_mmio(vm, seg->gpa, seg->len);
571
if (error == 0) {
572
seg->gpa = 0;
573
seg->len = 0;
574
}
575
break;
576
}
577
}
578
out:
579
PPT_UNLOCK();
580
return (ENOENT);
581
}
582
583
static int
584
pptintr(void *arg)
585
{
586
struct pptdev *ppt;
587
struct pptintr_arg *pptarg;
588
589
pptarg = arg;
590
ppt = pptarg->pptdev;
591
592
if (ppt->vm != NULL)
593
lapic_intr_msi(ppt->vm, pptarg->addr, pptarg->msg_data);
594
else {
595
/*
596
* XXX
597
* This is not expected to happen - panic?
598
*/
599
}
600
601
/*
602
* For legacy interrupts give other filters a chance in case
603
* the interrupt was not generated by the passthrough device.
604
*/
605
if (ppt->msi.startrid == 0)
606
return (FILTER_STRAY);
607
else
608
return (FILTER_HANDLED);
609
}
610
611
int
612
ppt_setup_msi(struct vm *vm, int bus, int slot, int func,
613
uint64_t addr, uint64_t msg, int numvec)
614
{
615
int i, rid, flags;
616
int msi_count, startrid, error, tmp;
617
struct pptdev *ppt;
618
619
if (numvec < 0 || numvec > MAX_MSIMSGS)
620
return (EINVAL);
621
622
PPT_LOCK();
623
error = ppt_find(vm, bus, slot, func, &ppt);
624
if (error)
625
goto out;
626
627
/* Reject attempts to enable MSI while MSI-X is active. */
628
if (ppt->msix.num_msgs != 0 && numvec != 0) {
629
error = EBUSY;
630
goto out;
631
}
632
633
/* Free any allocated resources */
634
ppt_teardown_msi(ppt);
635
636
if (numvec == 0) /* nothing more to do */
637
goto out;
638
639
flags = RF_ACTIVE;
640
msi_count = pci_msi_count(ppt->dev);
641
if (msi_count == 0) {
642
startrid = 0; /* legacy interrupt */
643
msi_count = 1;
644
flags |= RF_SHAREABLE;
645
} else
646
startrid = 1; /* MSI */
647
648
/*
649
* The device must be capable of supporting the number of vectors
650
* the guest wants to allocate.
651
*/
652
if (numvec > msi_count) {
653
error = EINVAL;
654
goto out;
655
}
656
657
/*
658
* Make sure that we can allocate all the MSI vectors that are needed
659
* by the guest.
660
*/
661
if (startrid == 1) {
662
tmp = numvec;
663
error = pci_alloc_msi(ppt->dev, &tmp);
664
if (error)
665
goto out;
666
else if (tmp != numvec) {
667
pci_release_msi(ppt->dev);
668
error = ENOSPC;
669
goto out;
670
} else {
671
/* success */
672
}
673
}
674
675
ppt->msi.startrid = startrid;
676
677
/*
678
* Allocate the irq resource and attach it to the interrupt handler.
679
*/
680
for (i = 0; i < numvec; i++) {
681
ppt->msi.num_msgs = i + 1;
682
ppt->msi.cookie[i] = NULL;
683
684
rid = startrid + i;
685
ppt->msi.res[i] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ,
686
&rid, flags);
687
if (ppt->msi.res[i] == NULL)
688
break;
689
690
ppt->msi.arg[i].pptdev = ppt;
691
ppt->msi.arg[i].addr = addr;
692
ppt->msi.arg[i].msg_data = msg + i;
693
694
error = bus_setup_intr(ppt->dev, ppt->msi.res[i],
695
INTR_TYPE_NET | INTR_MPSAFE,
696
pptintr, NULL, &ppt->msi.arg[i],
697
&ppt->msi.cookie[i]);
698
if (error != 0)
699
break;
700
}
701
702
if (i < numvec) {
703
ppt_teardown_msi(ppt);
704
error = ENXIO;
705
}
706
707
out:
708
PPT_UNLOCK();
709
return (error);
710
}
711
712
int
713
ppt_setup_msix(struct vm *vm, int bus, int slot, int func,
714
int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
715
{
716
struct pptdev *ppt;
717
struct pci_devinfo *dinfo;
718
int numvec, alloced, rid, error;
719
size_t res_size, cookie_size, arg_size;
720
721
PPT_LOCK();
722
error = ppt_find(vm, bus, slot, func, &ppt);
723
if (error)
724
goto out;
725
726
/* Reject attempts to enable MSI-X while MSI is active. */
727
if (ppt->msi.num_msgs != 0) {
728
error = EBUSY;
729
goto out;
730
}
731
732
dinfo = device_get_ivars(ppt->dev);
733
if (dinfo == NULL) {
734
error = ENXIO;
735
goto out;
736
}
737
738
/*
739
* First-time configuration:
740
* Allocate the MSI-X table
741
* Allocate the IRQ resources
742
* Set up some variables in ppt->msix
743
*/
744
if (ppt->msix.num_msgs == 0) {
745
numvec = pci_msix_count(ppt->dev);
746
if (numvec <= 0) {
747
error = EINVAL;
748
goto out;
749
}
750
751
ppt->msix.startrid = 1;
752
ppt->msix.num_msgs = numvec;
753
754
res_size = numvec * sizeof(ppt->msix.res[0]);
755
cookie_size = numvec * sizeof(ppt->msix.cookie[0]);
756
arg_size = numvec * sizeof(ppt->msix.arg[0]);
757
758
ppt->msix.res = malloc(res_size, M_PPTMSIX, M_WAITOK | M_ZERO);
759
ppt->msix.cookie = malloc(cookie_size, M_PPTMSIX,
760
M_WAITOK | M_ZERO);
761
ppt->msix.arg = malloc(arg_size, M_PPTMSIX, M_WAITOK | M_ZERO);
762
763
rid = dinfo->cfg.msix.msix_table_bar;
764
ppt->msix.msix_table_res = bus_alloc_resource_any(ppt->dev,
765
SYS_RES_MEMORY, &rid, RF_ACTIVE);
766
767
if (ppt->msix.msix_table_res == NULL) {
768
ppt_teardown_msix(ppt);
769
error = ENOSPC;
770
goto out;
771
}
772
ppt->msix.msix_table_rid = rid;
773
774
if (dinfo->cfg.msix.msix_table_bar !=
775
dinfo->cfg.msix.msix_pba_bar) {
776
rid = dinfo->cfg.msix.msix_pba_bar;
777
ppt->msix.msix_pba_res = bus_alloc_resource_any(
778
ppt->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
779
780
if (ppt->msix.msix_pba_res == NULL) {
781
ppt_teardown_msix(ppt);
782
error = ENOSPC;
783
goto out;
784
}
785
ppt->msix.msix_pba_rid = rid;
786
}
787
788
alloced = numvec;
789
error = pci_alloc_msix(ppt->dev, &alloced);
790
if (error || alloced != numvec) {
791
ppt_teardown_msix(ppt);
792
if (error == 0)
793
error = ENOSPC;
794
goto out;
795
}
796
}
797
798
if (idx >= ppt->msix.num_msgs) {
799
error = EINVAL;
800
goto out;
801
}
802
803
if ((vector_control & PCIM_MSIX_VCTRL_MASK) == 0) {
804
/* Tear down the IRQ if it's already set up */
805
ppt_teardown_msix_intr(ppt, idx);
806
807
/* Allocate the IRQ resource */
808
ppt->msix.cookie[idx] = NULL;
809
rid = ppt->msix.startrid + idx;
810
ppt->msix.res[idx] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ,
811
&rid, RF_ACTIVE);
812
if (ppt->msix.res[idx] == NULL) {
813
error = ENXIO;
814
goto out;
815
}
816
817
ppt->msix.arg[idx].pptdev = ppt;
818
ppt->msix.arg[idx].addr = addr;
819
ppt->msix.arg[idx].msg_data = msg;
820
821
/* Setup the MSI-X interrupt */
822
error = bus_setup_intr(ppt->dev, ppt->msix.res[idx],
823
INTR_TYPE_NET | INTR_MPSAFE,
824
pptintr, NULL, &ppt->msix.arg[idx],
825
&ppt->msix.cookie[idx]);
826
if (error != 0) {
827
bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, ppt->msix.res[idx]);
828
ppt->msix.cookie[idx] = NULL;
829
ppt->msix.res[idx] = NULL;
830
error = ENXIO;
831
goto out;
832
}
833
} else {
834
/* Masked, tear it down if it's already been set up */
835
ppt_teardown_msix_intr(ppt, idx);
836
}
837
out:
838
PPT_UNLOCK();
839
return (error);
840
}
841
842
int
843
ppt_disable_msix(struct vm *vm, int bus, int slot, int func)
844
{
845
struct pptdev *ppt;
846
int error;
847
848
PPT_LOCK();
849
error = ppt_find(vm, bus, slot, func, &ppt);
850
if (error != 0) {
851
PPT_UNLOCK();
852
return (error);
853
}
854
ppt_teardown_msix(ppt);
855
PPT_UNLOCK();
856
return (0);
857
}
858
859