Path: blob/master/arch/powerpc/platforms/powernv/pci-sriov.c
26481 views
// SPDX-License-Identifier: GPL-2.0-or-later12#include <linux/kernel.h>3#include <linux/ioport.h>4#include <linux/bitmap.h>5#include <linux/pci.h>67#include <asm/opal.h>89#include "pci.h"1011/*12* The majority of the complexity in supporting SR-IOV on PowerNV comes from13* the need to put the MMIO space for each VF into a separate PE. Internally14* the PHB maps MMIO addresses to a specific PE using the "Memory BAR Table".15* The MBT historically only applied to the 64bit MMIO window of the PHB16* so it's common to see it referred to as the "M64BT".17*18* An MBT entry stores the mapped range as an <base>,<mask> pair. This forces19* the address range that we want to map to be power-of-two sized and aligned.20* For conventional PCI devices this isn't really an issue since PCI device BARs21* have the same requirement.22*23* For a SR-IOV BAR things are a little more awkward since size and alignment24* are not coupled. The alignment is set based on the per-VF BAR size, but25* the total BAR area is: number-of-vfs * per-vf-size. The number of VFs26* isn't necessarily a power of two, so neither is the total size. To fix that27* we need to finesse (read: hack) the Linux BAR allocator so that it will28* allocate the SR-IOV BARs in a way that lets us map them using the MBT.29*30* The changes to size and alignment that we need to do depend on the "mode"31* of MBT entry that we use. We only support SR-IOV on PHB3 (IODA2) and above,32* so as a baseline we can assume that we have the following BAR modes33* available:34*35* NB: $PE_COUNT is the number of PEs that the PHB supports.36*37* a) A segmented BAR that splits the mapped range into $PE_COUNT equally sized38* segments. The n'th segment is mapped to the n'th PE.39* b) An un-segmented BAR that maps the whole address range to a specific PE.40*41*42* We prefer to use mode a) since it only requires one MBT entry per SR-IOV BAR43* For comparison b) requires one entry per-VF per-BAR, or:44* (num-vfs * num-sriov-bars) in total. To use a) we need the size of each segment45* to equal the size of the per-VF BAR area. So:46*47* new_size = per-vf-size * number-of-PEs48*49* The alignment for the SR-IOV BAR also needs to be changed from per-vf-size50* to "new_size", calculated above. Implementing this is a convoluted process51* which requires several hooks in the PCI core:52*53* 1. In pcibios_device_add() we call pnv_pci_ioda_fixup_iov().54*55* At this point the device has been probed and the device's BARs are sized,56* but no resource allocations have been done. The SR-IOV BARs are sized57* based on the maximum number of VFs supported by the device and we need58* to increase that to new_size.59*60* 2. Later, when Linux actually assigns resources it tries to make the resource61* allocations for each PCI bus as compact as possible. As a part of that it62* sorts the BARs on a bus by their required alignment, which is calculated63* using pci_resource_alignment().64*65* For IOV resources this goes:66* pci_resource_alignment()67* pci_sriov_resource_alignment()68* pcibios_sriov_resource_alignment()69* pnv_pci_iov_resource_alignment()70*71* Our hook overrides the default alignment, equal to the per-vf-size, with72* new_size computed above.73*74* 3. When userspace enables VFs for a device:75*76* sriov_enable()77* pcibios_sriov_enable()78* pnv_pcibios_sriov_enable()79*80* This is where we actually allocate PE numbers for each VF and setup the81* MBT mapping for each SR-IOV BAR. In steps 1) and 2) we setup an "arena"82* where each MBT segment is equal in size to the VF BAR so we can shift83* around the actual SR-IOV BAR location within this arena. We need this84* ability because the PE space is shared by all devices on the same PHB.85* When using mode a) described above segment 0 in maps to PE#0 which might86* be already being used by another device on the PHB.87*88* As a result we need allocate a contigious range of PE numbers, then shift89* the address programmed into the SR-IOV BAR of the PF so that the address90* of VF0 matches up with the segment corresponding to the first allocated91* PE number. This is handled in pnv_pci_vf_resource_shift().92*93* Once all that is done we return to the PCI core which then enables VFs,94* scans them and creates pci_devs for each. The init process for a VF is95* largely the same as a normal device, but the VF is inserted into the IODA96* PE that we allocated for it rather than the PE associated with the bus.97*98* 4. When userspace disables VFs we unwind the above in99* pnv_pcibios_sriov_disable(). Fortunately this is relatively simple since100* we don't need to validate anything, just tear down the mappings and101* move SR-IOV resource back to its "proper" location.102*103* That's how mode a) works. In theory mode b) (single PE mapping) is less work104* since we can map each individual VF with a separate BAR. However, there's a105* few limitations:106*107* 1) For IODA2 mode b) has a minimum alignment requirement of 32MB. This makes108* it only usable for devices with very large per-VF BARs. Such devices are109* similar to Big Foot. They definitely exist, but I've never seen one.110*111* 2) The number of MBT entries that we have is limited. PHB3 and PHB4 only112* 16 total and some are needed for. Most SR-IOV capable network cards can support113* more than 16 VFs on each port.114*115* We use b) when using a) would use more than 1/4 of the entire 64 bit MMIO116* window of the PHB.117*118*119*120* PHB4 (IODA3) added a few new features that would be useful for SR-IOV. It121* allowed the MBT to map 32bit MMIO space in addition to 64bit which allows122* us to support SR-IOV BARs in the 32bit MMIO window. This is useful since123* the Linux BAR allocation will place any BAR marked as non-prefetchable into124* the non-prefetchable bridge window, which is 32bit only. It also added two125* new modes:126*127* c) A segmented BAR similar to a), but each segment can be individually128* mapped to any PE. This is matches how the 32bit MMIO window worked on129* IODA1&2.130*131* d) A segmented BAR with 8, 64, or 128 segments. This works similarly to a),132* but with fewer segments and configurable base PE.133*134* i.e. The n'th segment maps to the (n + base)'th PE.135*136* The base PE is also required to be a multiple of the window size.137*138* Unfortunately, the OPAL API doesn't currently (as of skiboot v6.6) allow us139* to exploit any of the IODA3 features.140*/141142static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)143{144struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);145struct resource *res;146int i;147resource_size_t vf_bar_sz;148struct pnv_iov_data *iov;149int mul;150151iov = kzalloc(sizeof(*iov), GFP_KERNEL);152if (!iov)153goto disable_iov;154pdev->dev.archdata.iov_data = iov;155mul = phb->ioda.total_pe_num;156157for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {158res = &pdev->resource[i + PCI_IOV_RESOURCES];159if (!res->flags || res->parent)160continue;161if (!pnv_pci_is_m64_flags(res->flags)) {162dev_warn(&pdev->dev, "Don't support SR-IOV with non M64 VF BAR%d: %pR. \n",163i, res);164goto disable_iov;165}166167vf_bar_sz = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);168169/*170* Generally, one segmented M64 BAR maps one IOV BAR. However,171* if a VF BAR is too large we end up wasting a lot of space.172* If each VF needs more than 1/4 of the default m64 segment173* then each VF BAR should be mapped in single-PE mode to reduce174* the amount of space required. This does however limit the175* number of VFs we can support.176*177* The 1/4 limit is arbitrary and can be tweaked.178*/179if (vf_bar_sz > (phb->ioda.m64_segsize >> 2)) {180/*181* On PHB3, the minimum size alignment of M64 BAR in182* single mode is 32MB. If this VF BAR is smaller than183* 32MB, but still too large for a segmented window184* then we can't map it and need to disable SR-IOV for185* this device.186*/187if (vf_bar_sz < SZ_32M) {188pci_err(pdev, "VF BAR%d: %pR can't be mapped in single PE mode\n",189i, res);190goto disable_iov;191}192193iov->m64_single_mode[i] = true;194continue;195}196197/*198* This BAR can be mapped with one segmented window, so adjust199* te resource size to accommodate.200*/201pci_dbg(pdev, " Fixing VF BAR%d: %pR to\n", i, res);202res->end = res->start + vf_bar_sz * mul - 1;203pci_dbg(pdev, " %pR\n", res);204205pci_info(pdev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",206i, res, mul);207208iov->need_shift = true;209}210211return;212213disable_iov:214/* Save ourselves some MMIO space by disabling the unusable BARs */215for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {216res = &pdev->resource[i + PCI_IOV_RESOURCES];217res->flags = 0;218res->end = res->start - 1;219}220221pdev->dev.archdata.iov_data = NULL;222kfree(iov);223}224225void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)226{227if (pdev->is_virtfn) {228struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);229230/*231* VF PEs are single-device PEs so their pdev pointer needs to232* be set. The pdev doesn't exist when the PE is allocated (in233* (pcibios_sriov_enable()) so we fix it up here.234*/235pe->pdev = pdev;236WARN_ON(!(pe->flags & PNV_IODA_PE_VF));237} else if (pdev->is_physfn) {238/*239* For PFs adjust their allocated IOV resources to match what240* the PHB can support using its M64 BAR table.241*/242pnv_pci_ioda_fixup_iov_resources(pdev);243}244}245246resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,247int resno)248{249resource_size_t align = pci_iov_resource_size(pdev, resno);250struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);251struct pnv_iov_data *iov = pnv_iov_get(pdev);252253/*254* iov can be null if we have an SR-IOV device with IOV BAR that can't255* be placed in the m64 space (i.e. The BAR is 32bit or non-prefetch).256* In that case we don't allow VFs to be enabled since one of their257* BARs would not be placed in the correct PE.258*/259if (!iov)260return align;261262/*263* If we're using single mode then we can just use the native VF BAR264* alignment. We validated that it's possible to use a single PE265* window above when we did the fixup.266*/267if (iov->m64_single_mode[resno - PCI_IOV_RESOURCES])268return align;269270/*271* On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the272* SR-IOV. While from hardware perspective, the range mapped by M64273* BAR should be size aligned.274*275* This function returns the total IOV BAR size if M64 BAR is in276* Shared PE mode or just VF BAR size if not.277* If the M64 BAR is in Single PE mode, return the VF BAR size or278* M64 segment size if IOV BAR size is less.279*/280return phb->ioda.total_pe_num * align;281}282283static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs)284{285struct pnv_iov_data *iov;286struct pnv_phb *phb;287int window_id;288289phb = pci_bus_to_pnvhb(pdev->bus);290iov = pnv_iov_get(pdev);291292for_each_set_bit(window_id, iov->used_m64_bar_mask, MAX_M64_BARS) {293opal_pci_phb_mmio_enable(phb->opal_id,294OPAL_M64_WINDOW_TYPE,295window_id,2960);297298clear_bit(window_id, &phb->ioda.m64_bar_alloc);299}300301return 0;302}303304305/*306* PHB3 and beyond support segmented windows. The window's address range307* is subdivided into phb->ioda.total_pe_num segments and there's a 1-1308* mapping between PEs and segments.309*/310static int64_t pnv_ioda_map_m64_segmented(struct pnv_phb *phb,311int window_id,312resource_size_t start,313resource_size_t size)314{315int64_t rc;316317rc = opal_pci_set_phb_mem_window(phb->opal_id,318OPAL_M64_WINDOW_TYPE,319window_id,320start,3210, /* unused */322size);323if (rc)324goto out;325326rc = opal_pci_phb_mmio_enable(phb->opal_id,327OPAL_M64_WINDOW_TYPE,328window_id,329OPAL_ENABLE_M64_SPLIT);330out:331if (rc)332pr_err("Failed to map M64 window #%d: %lld\n", window_id, rc);333334return rc;335}336337static int64_t pnv_ioda_map_m64_single(struct pnv_phb *phb,338int pe_num,339int window_id,340resource_size_t start,341resource_size_t size)342{343int64_t rc;344345/*346* The API for setting up m64 mmio windows seems to have been designed347* with P7-IOC in mind. For that chip each M64 BAR (window) had a fixed348* split of 8 equally sized segments each of which could individually349* assigned to a PE.350*351* The problem with this is that the API doesn't have any way to352* communicate the number of segments we want on a BAR. This wasn't353* a problem for p7-ioc since you didn't have a choice, but the354* single PE windows added in PHB3 don't map cleanly to this API.355*356* As a result we've got this slightly awkward process where we357* call opal_pci_map_pe_mmio_window() to put the single in single358* PE mode, and set the PE for the window before setting the address359* bounds. We need to do it this way because the single PE windows360* for PHB3 have different alignment requirements on PHB3.361*/362rc = opal_pci_map_pe_mmio_window(phb->opal_id,363pe_num,364OPAL_M64_WINDOW_TYPE,365window_id,3660);367if (rc)368goto out;369370/*371* NB: In single PE mode the window needs to be aligned to 32MB372*/373rc = opal_pci_set_phb_mem_window(phb->opal_id,374OPAL_M64_WINDOW_TYPE,375window_id,376start,3770, /* ignored by FW, m64 is 1-1 */378size);379if (rc)380goto out;381382/*383* Now actually enable it. We specified the BAR should be in "non-split"384* mode so FW will validate that the BAR is in single PE mode.385*/386rc = opal_pci_phb_mmio_enable(phb->opal_id,387OPAL_M64_WINDOW_TYPE,388window_id,389OPAL_ENABLE_M64_NON_SPLIT);390out:391if (rc)392pr_err("Error mapping single PE BAR\n");393394return rc;395}396397static int pnv_pci_alloc_m64_bar(struct pnv_phb *phb, struct pnv_iov_data *iov)398{399int win;400401do {402win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,403phb->ioda.m64_bar_idx + 1, 0);404405if (win >= phb->ioda.m64_bar_idx + 1)406return -1;407} while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));408409set_bit(win, iov->used_m64_bar_mask);410411return win;412}413414static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)415{416struct pnv_iov_data *iov;417struct pnv_phb *phb;418int win;419struct resource *res;420int i, j;421int64_t rc;422resource_size_t size, start;423int base_pe_num;424425phb = pci_bus_to_pnvhb(pdev->bus);426iov = pnv_iov_get(pdev);427428for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {429res = &pdev->resource[i + PCI_IOV_RESOURCES];430if (!res->flags || !res->parent)431continue;432433/* don't need single mode? map everything in one go! */434if (!iov->m64_single_mode[i]) {435win = pnv_pci_alloc_m64_bar(phb, iov);436if (win < 0)437goto m64_failed;438439size = resource_size(res);440start = res->start;441442rc = pnv_ioda_map_m64_segmented(phb, win, start, size);443if (rc)444goto m64_failed;445446continue;447}448449/* otherwise map each VF with single PE BARs */450size = pci_iov_resource_size(pdev, PCI_IOV_RESOURCES + i);451base_pe_num = iov->vf_pe_arr[0].pe_number;452453for (j = 0; j < num_vfs; j++) {454win = pnv_pci_alloc_m64_bar(phb, iov);455if (win < 0)456goto m64_failed;457458start = res->start + size * j;459rc = pnv_ioda_map_m64_single(phb, win,460base_pe_num + j,461start,462size);463if (rc)464goto m64_failed;465}466}467return 0;468469m64_failed:470pnv_pci_vf_release_m64(pdev, num_vfs);471return -EBUSY;472}473474static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)475{476struct pnv_phb *phb;477struct pnv_ioda_pe *pe, *pe_n;478479phb = pci_bus_to_pnvhb(pdev->bus);480481if (!pdev->is_physfn)482return;483484/* FIXME: Use pnv_ioda_release_pe()? */485list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {486if (pe->parent_dev != pdev)487continue;488489pnv_pci_ioda2_release_pe_dma(pe);490491/* Remove from list */492mutex_lock(&phb->ioda.pe_list_mutex);493list_del(&pe->list);494mutex_unlock(&phb->ioda.pe_list_mutex);495496pnv_ioda_deconfigure_pe(phb, pe);497498pnv_ioda_free_pe(pe);499}500}501502static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)503{504struct resource *res, res2;505struct pnv_iov_data *iov;506resource_size_t size;507u16 num_vfs;508int i;509510if (!dev->is_physfn)511return -EINVAL;512iov = pnv_iov_get(dev);513514/*515* "offset" is in VFs. The M64 windows are sized so that when they516* are segmented, each segment is the same size as the IOV BAR.517* Each segment is in a separate PE, and the high order bits of the518* address are the PE number. Therefore, each VF's BAR is in a519* separate PE, and changing the IOV BAR start address changes the520* range of PEs the VFs are in.521*/522num_vfs = iov->num_vfs;523for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {524res = &dev->resource[i + PCI_IOV_RESOURCES];525if (!res->flags || !res->parent)526continue;527if (iov->m64_single_mode[i])528continue;529530/*531* The actual IOV BAR range is determined by the start address532* and the actual size for num_vfs VFs BAR. This check is to533* make sure that after shifting, the range will not overlap534* with another device.535*/536size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);537res2.flags = res->flags;538res2.start = res->start + (size * offset);539res2.end = res2.start + (size * num_vfs) - 1;540541if (res2.end > res->end) {542dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",543i, &res2, res, num_vfs, offset);544return -EBUSY;545}546}547548/*549* Since M64 BAR shares segments among all possible 256 PEs,550* we have to shift the beginning of PF IOV BAR to make it start from551* the segment which belongs to the PE number assigned to the first VF.552* This creates a "hole" in the /proc/iomem which could be used for553* allocating other resources so we reserve this area below and554* release when IOV is released.555*/556for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {557res = &dev->resource[i + PCI_IOV_RESOURCES];558if (!res->flags || !res->parent)559continue;560if (iov->m64_single_mode[i])561continue;562563size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);564res2 = *res;565res->start += size * offset;566567dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",568i, &res2, res, (offset > 0) ? "En" : "Dis",569num_vfs, offset);570571if (offset < 0) {572devm_release_resource(&dev->dev, &iov->holes[i]);573memset(&iov->holes[i], 0, sizeof(iov->holes[i]));574}575576pci_update_resource(dev, i + PCI_IOV_RESOURCES);577578if (offset > 0) {579iov->holes[i].start = res2.start;580iov->holes[i].end = res2.start + size * offset - 1;581iov->holes[i].flags = IORESOURCE_BUS;582iov->holes[i].name = "pnv_iov_reserved";583devm_request_resource(&dev->dev, res->parent,584&iov->holes[i]);585}586}587return 0;588}589590static void pnv_pci_sriov_disable(struct pci_dev *pdev)591{592u16 num_vfs, base_pe;593struct pnv_iov_data *iov;594595iov = pnv_iov_get(pdev);596if (WARN_ON(!iov))597return;598599num_vfs = iov->num_vfs;600base_pe = iov->vf_pe_arr[0].pe_number;601602/* Release VF PEs */603pnv_ioda_release_vf_PE(pdev);604605/* Un-shift the IOV BARs if we need to */606if (iov->need_shift)607pnv_pci_vf_resource_shift(pdev, -base_pe);608609/* Release M64 windows */610pnv_pci_vf_release_m64(pdev, num_vfs);611}612613static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)614{615struct pnv_phb *phb;616struct pnv_ioda_pe *pe;617int pe_num;618u16 vf_index;619struct pnv_iov_data *iov;620struct pci_dn *pdn;621622if (!pdev->is_physfn)623return;624625phb = pci_bus_to_pnvhb(pdev->bus);626pdn = pci_get_pdn(pdev);627iov = pnv_iov_get(pdev);628629/* Reserve PE for each VF */630for (vf_index = 0; vf_index < num_vfs; vf_index++) {631int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);632int vf_bus = pci_iov_virtfn_bus(pdev, vf_index);633struct pci_dn *vf_pdn;634635pe = &iov->vf_pe_arr[vf_index];636pe->phb = phb;637pe->flags = PNV_IODA_PE_VF;638pe->pbus = NULL;639pe->parent_dev = pdev;640pe->mve_number = -1;641pe->rid = (vf_bus << 8) | vf_devfn;642643pe_num = pe->pe_number;644pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",645pci_domain_nr(pdev->bus), pdev->bus->number,646PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);647648if (pnv_ioda_configure_pe(phb, pe)) {649/* XXX What do we do here ? */650pnv_ioda_free_pe(pe);651pe->pdev = NULL;652continue;653}654655/* Put PE to the list */656mutex_lock(&phb->ioda.pe_list_mutex);657list_add_tail(&pe->list, &phb->ioda.pe_list);658mutex_unlock(&phb->ioda.pe_list_mutex);659660/* associate this pe to its pdn */661list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {662if (vf_pdn->busno == vf_bus &&663vf_pdn->devfn == vf_devfn) {664vf_pdn->pe_number = pe_num;665break;666}667}668669pnv_pci_ioda2_setup_dma_pe(phb, pe);670}671}672673static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)674{675struct pnv_ioda_pe *base_pe;676struct pnv_iov_data *iov;677struct pnv_phb *phb;678int ret;679u16 i;680681phb = pci_bus_to_pnvhb(pdev->bus);682iov = pnv_iov_get(pdev);683684/*685* There's a calls to IODA2 PE setup code littered throughout. We could686* probably fix that, but we'd still have problems due to the687* restriction inherent on IODA1 PHBs.688*689* NB: We class IODA3 as IODA2 since they're very similar.690*/691if (phb->type != PNV_PHB_IODA2) {692pci_err(pdev, "SR-IOV is not supported on this PHB\n");693return -ENXIO;694}695696if (!iov) {697dev_info(&pdev->dev, "don't support this SRIOV device with non 64bit-prefetchable IOV BAR\n");698return -ENOSPC;699}700701/* allocate a contiguous block of PEs for our VFs */702base_pe = pnv_ioda_alloc_pe(phb, num_vfs);703if (!base_pe) {704pci_err(pdev, "Unable to allocate PEs for %d VFs\n", num_vfs);705return -EBUSY;706}707708iov->vf_pe_arr = base_pe;709iov->num_vfs = num_vfs;710711/* Assign M64 window accordingly */712ret = pnv_pci_vf_assign_m64(pdev, num_vfs);713if (ret) {714dev_info(&pdev->dev, "Not enough M64 window resources\n");715goto m64_failed;716}717718/*719* When using one M64 BAR to map one IOV BAR, we need to shift720* the IOV BAR according to the PE# allocated to the VFs.721* Otherwise, the PE# for the VF will conflict with others.722*/723if (iov->need_shift) {724ret = pnv_pci_vf_resource_shift(pdev, base_pe->pe_number);725if (ret)726goto shift_failed;727}728729/* Setup VF PEs */730pnv_ioda_setup_vf_PE(pdev, num_vfs);731732return 0;733734shift_failed:735pnv_pci_vf_release_m64(pdev, num_vfs);736737m64_failed:738for (i = 0; i < num_vfs; i++)739pnv_ioda_free_pe(&iov->vf_pe_arr[i]);740741return ret;742}743744int pnv_pcibios_sriov_disable(struct pci_dev *pdev)745{746pnv_pci_sriov_disable(pdev);747748/* Release PCI data */749remove_sriov_vf_pdns(pdev);750return 0;751}752753int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)754{755/* Allocate PCI data */756add_sriov_vf_pdns(pdev);757758return pnv_pci_sriov_enable(pdev, num_vfs);759}760761762