Path: blob/main/sys/compat/linuxkpi/common/src/linux_pci.c
103032 views
/*-1* Copyright (c) 2015-2016 Mellanox Technologies, Ltd.2* All rights reserved.3* Copyright (c) 2020-2025 The FreeBSD Foundation4*5* Portions of this software were developed by Björn Zeeb6* under sponsorship from the FreeBSD Foundation.7*8* Redistribution and use in source and binary forms, with or without9* modification, are permitted provided that the following conditions10* are met:11* 1. Redistributions of source code must retain the above copyright12* notice unmodified, this list of conditions, and the following13* disclaimer.14* 2. Redistributions in binary form must reproduce the above copyright15* notice, this list of conditions and the following disclaimer in the16* documentation and/or other materials provided with the distribution.17*18* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR19* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES20* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.21* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,22* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT23* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,24* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY25* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT26* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF27* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.28*/2930#include <sys/param.h>31#include <sys/systm.h>32#include <sys/bus.h>33#include <sys/malloc.h>34#include <sys/kernel.h>35#include <sys/sysctl.h>36#include <sys/lock.h>37#include <sys/mutex.h>38#include <sys/fcntl.h>39#include <sys/file.h>40#include <sys/filio.h>41#include <sys/pciio.h>42#include <sys/pctrie.h>43#include <sys/rman.h>44#include <sys/rwlock.h>45#include <sys/stdarg.h>4647#include <vm/vm.h>48#include <vm/pmap.h>4950#include <machine/bus.h>51#include <machine/resource.h>5253#include <dev/pci/pcivar.h>54#include <dev/pci/pci_private.h>55#include <dev/pci/pci_iov.h>56#include <dev/backlight/backlight.h>5758#include <linux/kernel.h>59#include <linux/kobject.h>60#include <linux/device.h>61#include <linux/slab.h>62#include <linux/module.h>63#include <linux/cdev.h>64#include <linux/file.h>65#include <linux/sysfs.h>66#include <linux/mm.h>67#include <linux/io.h>68#include <linux/vmalloc.h>69#define WANT_NATIVE_PCI_GET_SLOT70#include <linux/pci.h>71#include <linux/compat.h>7273#include <linux/backlight.h>7475#include "backlight_if.h"76#include "pcib_if.h"7778/* Undef the linux function macro defined in linux/pci.h */79#undef pci_get_class8081extern int linuxkpi_debug;8283SYSCTL_DECL(_compat_linuxkpi);8485static counter_u64_t lkpi_pci_nseg1_fail;86SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD,87&lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment");8889static device_probe_t linux_pci_probe;90static device_attach_t linux_pci_attach;91static device_detach_t linux_pci_detach;92static device_suspend_t linux_pci_suspend;93static device_resume_t linux_pci_resume;94static device_shutdown_t linux_pci_shutdown;95static pci_iov_init_t linux_pci_iov_init;96static pci_iov_uninit_t linux_pci_iov_uninit;97static pci_iov_add_vf_t linux_pci_iov_add_vf;98static int linux_backlight_get_status(device_t dev, struct backlight_props *props);99static int linux_backlight_update_status(device_t dev, struct backlight_props *props);100static int linux_backlight_get_info(device_t dev, struct backlight_info *info);101static void lkpi_pcim_iomap_table_release(struct device *, void *);102103static device_method_t pci_methods[] = {104DEVMETHOD(device_probe, linux_pci_probe),105DEVMETHOD(device_attach, linux_pci_attach),106DEVMETHOD(device_detach, linux_pci_detach),107DEVMETHOD(device_suspend, linux_pci_suspend),108DEVMETHOD(device_resume, linux_pci_resume),109DEVMETHOD(device_shutdown, linux_pci_shutdown),110DEVMETHOD(pci_iov_init, linux_pci_iov_init),111DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit),112DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf),113114/* Bus interface. */115DEVMETHOD(bus_add_child, bus_generic_add_child),116117/* backlight interface */118DEVMETHOD(backlight_update_status, linux_backlight_update_status),119DEVMETHOD(backlight_get_status, linux_backlight_get_status),120DEVMETHOD(backlight_get_info, linux_backlight_get_info),121DEVMETHOD_END122};123124const char *pci_power_names[] = {125"UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold"126};127128/* We need some meta-struct to keep track of these for devres. */129struct pci_devres {130bool enable_io;131/* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */132uint8_t region_mask;133struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */134};135struct pcim_iomap_devres {136void *mmio_table[PCIR_MAX_BAR_0 + 1];137struct resource *res_table[PCIR_MAX_BAR_0 + 1];138};139140struct linux_dma_priv {141uint64_t dma_mask;142bus_dma_tag_t dmat;143uint64_t dma_coherent_mask;144bus_dma_tag_t dmat_coherent;145struct mtx lock;146struct pctrie ptree;147};148#define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock)149#define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock)150151static void152lkpi_set_pcim_iomap_devres(struct pcim_iomap_devres *dr, int bar,153void *res)154{155dr->mmio_table[bar] = (void *)rman_get_bushandle(res);156dr->res_table[bar] = res;157}158159static bool160lkpi_pci_bar_id_valid(int bar)161{162if (bar < 0 || bar > PCIR_MAX_BAR_0)163return (false);164165return (true);166}167168static int169linux_pdev_dma_uninit(struct pci_dev *pdev)170{171struct linux_dma_priv *priv;172173priv = pdev->dev.dma_priv;174if (priv->dmat)175bus_dma_tag_destroy(priv->dmat);176if (priv->dmat_coherent)177bus_dma_tag_destroy(priv->dmat_coherent);178mtx_destroy(&priv->lock);179pdev->dev.dma_priv = NULL;180free(priv, M_DEVBUF);181return (0);182}183184static int185linux_pdev_dma_init(struct pci_dev *pdev)186{187struct linux_dma_priv *priv;188int error;189190priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO);191192mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF);193pctrie_init(&priv->ptree);194195pdev->dev.dma_priv = priv;196197/* Create a default DMA tags. */198error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64));199if (error != 0)200goto err;201/* Coherent is lower 32bit only by default in Linux. */202error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32));203if (error != 0)204goto err;205206return (error);207208err:209linux_pdev_dma_uninit(pdev);210return (error);211}212213int214linux_dma_tag_init(struct device *dev, u64 dma_mask)215{216struct linux_dma_priv *priv;217int error;218219priv = dev->dma_priv;220221if (priv->dmat) {222if (priv->dma_mask == dma_mask)223return (0);224225bus_dma_tag_destroy(priv->dmat);226}227228priv->dma_mask = dma_mask;229230error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),2311, 0, /* alignment, boundary */232dma_mask, /* lowaddr */233BUS_SPACE_MAXADDR, /* highaddr */234NULL, NULL, /* filtfunc, filtfuncarg */235BUS_SPACE_MAXSIZE, /* maxsize */2361, /* nsegments */237BUS_SPACE_MAXSIZE, /* maxsegsz */2380, /* flags */239NULL, NULL, /* lockfunc, lockfuncarg */240&priv->dmat);241return (-error);242}243244int245linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask)246{247struct linux_dma_priv *priv;248int error;249250priv = dev->dma_priv;251252if (priv->dmat_coherent) {253if (priv->dma_coherent_mask == dma_mask)254return (0);255256bus_dma_tag_destroy(priv->dmat_coherent);257}258259priv->dma_coherent_mask = dma_mask;260261error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),2621, 0, /* alignment, boundary */263dma_mask, /* lowaddr */264BUS_SPACE_MAXADDR, /* highaddr */265NULL, NULL, /* filtfunc, filtfuncarg */266BUS_SPACE_MAXSIZE, /* maxsize */2671, /* nsegments */268BUS_SPACE_MAXSIZE, /* maxsegsz */2690, /* flags */270NULL, NULL, /* lockfunc, lockfuncarg */271&priv->dmat_coherent);272return (-error);273}274275static struct pci_driver *276linux_pci_find(device_t dev, const struct pci_device_id **idp)277{278const struct pci_device_id *id;279struct pci_driver *pdrv;280uint16_t vendor;281uint16_t device;282uint16_t subvendor;283uint16_t subdevice;284285vendor = pci_get_vendor(dev);286device = pci_get_device(dev);287subvendor = pci_get_subvendor(dev);288subdevice = pci_get_subdevice(dev);289290spin_lock(&pci_lock);291list_for_each_entry(pdrv, &pci_drivers, node) {292for (id = pdrv->id_table; id->vendor != 0; id++) {293if (vendor == id->vendor &&294(PCI_ANY_ID == id->device || device == id->device) &&295(PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) &&296(PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) {297*idp = id;298spin_unlock(&pci_lock);299return (pdrv);300}301}302}303spin_unlock(&pci_lock);304return (NULL);305}306307struct pci_dev *308lkpi_pci_get_device(uint32_t vendor, uint32_t device, struct pci_dev *odev)309{310struct pci_dev *pdev, *found;311312found = NULL;313spin_lock(&pci_lock);314list_for_each_entry(pdev, &pci_devices, links) {315/* Walk until we find odev. */316if (odev != NULL) {317if (pdev == odev)318odev = NULL;319continue;320}321322if ((pdev->vendor == vendor || vendor == PCI_ANY_ID) &&323(pdev->device == device || device == PCI_ANY_ID)) {324found = pdev;325break;326}327}328pci_dev_get(found);329spin_unlock(&pci_lock);330331return (found);332}333334static void335lkpi_pci_dev_release(struct device *dev)336{337338lkpi_devres_release_free_list(dev);339spin_lock_destroy(&dev->devres_lock);340}341342static int343lkpifill_pci_dev(device_t dev, struct pci_dev *pdev)344{345struct pci_devinfo *dinfo;346int error;347348error = kobject_init_and_add(&pdev->dev.kobj, &linux_dev_ktype,349&linux_root_device.kobj, device_get_nameunit(dev));350if (error != 0) {351printf("%s:%d: kobject_init_and_add returned %d\n",352__func__, __LINE__, error);353return (error);354}355356pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev));357pdev->vendor = pci_get_vendor(dev);358pdev->device = pci_get_device(dev);359pdev->subsystem_vendor = pci_get_subvendor(dev);360pdev->subsystem_device = pci_get_subdevice(dev);361pdev->class = pci_get_class(dev);362pdev->revision = pci_get_revid(dev);363pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d",364pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),365pci_get_function(dev));366367pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO);368pdev->bus->number = pci_get_bus(dev);369pdev->bus->domain = pci_get_domain(dev);370371/* Check if we have reached the root to satisfy pci_is_root_bus() */372dinfo = device_get_ivars(dev);373if (dinfo->cfg.pcie.pcie_location != 0 &&374dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) {375pdev->bus->self = NULL;376} else {377/*378* This should be the upstream bridge; pci_upstream_bridge()379* handles that case on demand as otherwise we'll shadow the380* entire PCI hierarchy.381*/382pdev->bus->self = pdev;383}384pdev->dev.bsddev = dev;385pdev->dev.parent = &linux_root_device;386pdev->dev.release = lkpi_pci_dev_release;387388if (pci_msi_count(dev) > 0)389pdev->msi_desc = malloc(pci_msi_count(dev) *390sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO);391392TAILQ_INIT(&pdev->mmio);393spin_lock_init(&pdev->pcie_cap_lock);394spin_lock_init(&pdev->dev.devres_lock);395INIT_LIST_HEAD(&pdev->dev.devres_head);396INIT_LIST_HEAD(&pdev->dev.irqents);397398return (0);399}400401static void402lkpinew_pci_dev_release(struct device *dev)403{404struct pci_dev *pdev;405int i;406407pdev = to_pci_dev(dev);408if (pdev->root != NULL)409pci_dev_put(pdev->root);410if (pdev->bus->self != pdev && pdev->bus->self != NULL)411pci_dev_put(pdev->bus->self);412free(pdev->bus, M_DEVBUF);413if (pdev->msi_desc != NULL) {414for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--)415free(pdev->msi_desc[i], M_DEVBUF);416free(pdev->msi_desc, M_DEVBUF);417}418kfree(pdev->path_name);419free(pdev, M_DEVBUF);420}421422struct pci_dev *423lkpinew_pci_dev(device_t dev)424{425struct pci_dev *pdev;426int error;427428pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO);429error = lkpifill_pci_dev(dev, pdev);430if (error != 0) {431free(pdev, M_DEVBUF);432return (NULL);433}434pdev->dev.release = lkpinew_pci_dev_release;435436return (pdev);437}438439struct pci_dev *440lkpi_pci_get_class(unsigned int class, struct pci_dev *from)441{442device_t dev;443device_t devfrom = NULL;444struct pci_dev *pdev;445446if (from != NULL)447devfrom = from->dev.bsddev;448449dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom);450if (dev == NULL)451return (NULL);452453pdev = lkpinew_pci_dev(dev);454return (pdev);455}456457struct pci_dev *458lkpi_pci_get_base_class(unsigned int baseclass, struct pci_dev *from)459{460device_t dev;461device_t devfrom = NULL;462struct pci_dev *pdev;463464if (from != NULL)465devfrom = from->dev.bsddev;466467dev = pci_find_base_class_from(baseclass, devfrom);468if (dev == NULL)469return (NULL);470471pdev = lkpinew_pci_dev(dev);472return (pdev);473}474475struct pci_dev *476lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus,477unsigned int devfn)478{479device_t dev;480struct pci_dev *pdev;481482dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));483if (dev == NULL)484return (NULL);485486pdev = lkpinew_pci_dev(dev);487return (pdev);488}489490struct pci_dev *491lkpi_pci_get_slot(struct pci_bus *pbus, unsigned int devfn)492{493device_t dev;494struct pci_dev *pdev;495496dev = pci_find_bsf(pbus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));497if (dev == NULL)498return (NULL);499500pdev = lkpinew_pci_dev(dev);501return (pdev);502}503504static int505linux_pci_probe(device_t dev)506{507const struct pci_device_id *id;508struct pci_driver *pdrv;509510if ((pdrv = linux_pci_find(dev, &id)) == NULL)511return (ENXIO);512if (device_get_driver(dev) != &pdrv->bsddriver)513return (ENXIO);514device_set_desc(dev, pdrv->name);515516/* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */517if (pdrv->bsd_probe_return == 0)518return (BUS_PROBE_DEFAULT);519else520return (pdrv->bsd_probe_return);521}522523static int524linux_pci_attach(device_t dev)525{526const struct pci_device_id *id;527struct pci_driver *pdrv;528struct pci_dev *pdev;529530pdrv = linux_pci_find(dev, &id);531pdev = device_get_softc(dev);532533MPASS(pdrv != NULL);534MPASS(pdev != NULL);535536return (linux_pci_attach_device(dev, pdrv, id, pdev));537}538539static struct resource_list_entry *540linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl,541int type, int rid)542{543device_t dev;544struct resource *res;545546KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY,547("trying to reserve non-BAR type %d", type));548549dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?550device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;551res = pci_reserve_map(device_get_parent(dev), dev, type, rid, 0, ~0,5521, 1, 0);553if (res == NULL)554return (NULL);555return (resource_list_find(rl, type, rid));556}557558static struct resource_list_entry *559linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar)560{561struct pci_devinfo *dinfo;562struct resource_list *rl;563struct resource_list_entry *rle;564565dinfo = device_get_ivars(pdev->dev.bsddev);566rl = &dinfo->resources;567rle = resource_list_find(rl, type, rid);568/* Reserve resources for this BAR if needed. */569if (rle == NULL && reserve_bar)570rle = linux_pci_reserve_bar(pdev, rl, type, rid);571return (rle);572}573574int575linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,576const struct pci_device_id *id, struct pci_dev *pdev)577{578struct resource_list_entry *rle;579device_t parent;580struct pci_dev *pbus, *ppbus;581uintptr_t rid;582int error;583bool isdrm;584585linux_set_current(curthread);586587parent = device_get_parent(dev);588isdrm = pdrv != NULL && pdrv->isdrm;589590if (isdrm) {591struct pci_devinfo *dinfo;592593dinfo = device_get_ivars(parent);594device_set_ivars(dev, dinfo);595}596597error = lkpifill_pci_dev(dev, pdev);598if (error != 0)599return (error);600601if (isdrm)602PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid);603else604PCI_GET_ID(parent, dev, PCI_ID_RID, &rid);605pdev->devfn = rid;606pdev->pdrv = pdrv;607rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false);608if (rle != NULL)609pdev->dev.irq = rle->start;610else611pdev->dev.irq = LINUX_IRQ_INVALID;612pdev->irq = pdev->dev.irq;613error = linux_pdev_dma_init(pdev);614if (error)615goto out_dma_init;616617spin_lock(&pci_lock);618list_add(&pdev->links, &pci_devices);619spin_unlock(&pci_lock);620621/*622* Create the hierarchy now as we cannot on demand later.623* Take special care of DRM as there is a non-PCI device in the chain.624*/625pbus = pdev;626if (isdrm) {627pbus = lkpinew_pci_dev(parent);628if (pbus == NULL) {629error = ENXIO;630goto out_dma_init;631}632}633pcie_find_root_port(pbus);634if (isdrm)635pdev->root = pbus->root;636ppbus = pci_upstream_bridge(pbus);637while (ppbus != NULL && ppbus != pbus) {638pbus = ppbus;639ppbus = pci_upstream_bridge(pbus);640}641642if (pdrv != NULL) {643error = pdrv->probe(pdev, id);644if (error)645goto out_probe;646}647return (0);648649/* XXX the cleanup does not match the allocation up there. */650out_probe:651free(pdev->bus, M_DEVBUF);652spin_lock_destroy(&pdev->pcie_cap_lock);653linux_pdev_dma_uninit(pdev);654out_dma_init:655spin_lock(&pci_lock);656list_del(&pdev->links);657spin_unlock(&pci_lock);658put_device(&pdev->dev);659return (-error);660}661662static int663linux_pci_detach(device_t dev)664{665struct pci_dev *pdev;666667pdev = device_get_softc(dev);668669MPASS(pdev != NULL);670671device_set_desc(dev, NULL);672673return (linux_pci_detach_device(pdev));674}675676int677linux_pci_detach_device(struct pci_dev *pdev)678{679680linux_set_current(curthread);681682if (pdev->pdrv != NULL)683pdev->pdrv->remove(pdev);684685if (pdev->root != NULL)686pci_dev_put(pdev->root);687free(pdev->bus, M_DEVBUF);688linux_pdev_dma_uninit(pdev);689690spin_lock(&pci_lock);691list_del(&pdev->links);692spin_unlock(&pci_lock);693spin_lock_destroy(&pdev->pcie_cap_lock);694put_device(&pdev->dev);695696return (0);697}698699static int700lkpi_pci_disable_dev(struct device *dev)701{702703(void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY);704(void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT);705return (0);706}707708static struct pci_devres *709lkpi_pci_devres_get_alloc(struct pci_dev *pdev)710{711struct pci_devres *dr;712713dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL);714if (dr == NULL) {715dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr),716GFP_KERNEL | __GFP_ZERO);717if (dr != NULL)718lkpi_devres_add(&pdev->dev, dr);719}720721return (dr);722}723724static struct pci_devres *725lkpi_pci_devres_find(struct pci_dev *pdev)726{727if (!pdev->managed)728return (NULL);729730return (lkpi_pci_devres_get_alloc(pdev));731}732733void734lkpi_pci_devres_release(struct device *dev, void *p)735{736struct pci_devres *dr;737struct pci_dev *pdev;738int bar;739740pdev = to_pci_dev(dev);741dr = p;742743if (pdev->msix_enabled)744lkpi_pci_disable_msix(pdev);745if (pdev->msi_enabled)746lkpi_pci_disable_msi(pdev);747748if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0)749dr->enable_io = false;750751if (dr->region_mask == 0)752return;753for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {754755if ((dr->region_mask & (1 << bar)) == 0)756continue;757pci_release_region(pdev, bar);758}759}760761int762linuxkpi_pcim_enable_device(struct pci_dev *pdev)763{764struct pci_devres *dr;765int error;766767/* Here we cannot run through the pdev->managed check. */768dr = lkpi_pci_devres_get_alloc(pdev);769if (dr == NULL)770return (-ENOMEM);771772/* If resources were enabled before do not do it again. */773if (dr->enable_io)774return (0);775776error = pci_enable_device(pdev);777if (error == 0)778dr->enable_io = true;779780/* This device is not managed. */781pdev->managed = true;782783return (error);784}785786static struct pcim_iomap_devres *787lkpi_pcim_iomap_devres_find(struct pci_dev *pdev)788{789struct pcim_iomap_devres *dr;790791dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release,792NULL, NULL);793if (dr == NULL) {794dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release,795sizeof(*dr), GFP_KERNEL | __GFP_ZERO);796if (dr != NULL)797lkpi_devres_add(&pdev->dev, dr);798}799800if (dr == NULL)801device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__);802803return (dr);804}805806void __iomem **807linuxkpi_pcim_iomap_table(struct pci_dev *pdev)808{809struct pcim_iomap_devres *dr;810811dr = lkpi_pcim_iomap_devres_find(pdev);812if (dr == NULL)813return (NULL);814815/*816* If the driver has manually set a flag to be able to request the817* resource to use bus_read/write_<n>, return the shadow table.818*/819if (pdev->want_iomap_res)820return ((void **)dr->res_table);821822/* This is the Linux default. */823return (dr->mmio_table);824}825826static struct resource *827_lkpi_pci_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen __unused)828{829struct pci_mmio_region *mmio, *p;830int type;831832if (!lkpi_pci_bar_id_valid(bar))833return (NULL);834835type = pci_resource_type(pdev, bar);836if (type < 0) {837device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n",838__func__, bar, type);839return (NULL);840}841842/*843* Check for duplicate mappings.844* This can happen if a driver calls pci_request_region() first.845*/846TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {847if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) {848return (mmio->res);849}850}851852mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);853mmio->rid = PCIR_BAR(bar);854mmio->type = type;855mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type,856&mmio->rid, RF_ACTIVE|RF_SHAREABLE);857if (mmio->res == NULL) {858device_printf(pdev->dev.bsddev, "%s: failed to alloc "859"bar %d type %d rid %d\n",860__func__, bar, type, PCIR_BAR(bar));861free(mmio, M_DEVBUF);862return (NULL);863}864TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);865866return (mmio->res);867}868869void *870linuxkpi_pci_iomap_range(struct pci_dev *pdev, int bar,871unsigned long off, unsigned long maxlen)872{873struct resource *res;874875if (!lkpi_pci_bar_id_valid(bar))876return (NULL);877878res = _lkpi_pci_iomap(pdev, bar, maxlen);879if (res == NULL)880return (NULL);881/* This is a FreeBSD extension so we can use bus_*(). */882if (pdev->want_iomap_res)883return (res);884MPASS(off < rman_get_size(res));885return ((void *)(rman_get_bushandle(res) + off));886}887888void *889linuxkpi_pci_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)890{891if (!lkpi_pci_bar_id_valid(bar))892return (NULL);893894return (linuxkpi_pci_iomap_range(pdev, bar, 0, maxlen));895}896897void *898linuxkpi_pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)899{900struct pcim_iomap_devres *dr;901void *res;902903if (!lkpi_pci_bar_id_valid(bar))904return (NULL);905906dr = lkpi_pcim_iomap_devres_find(pdev);907if (dr == NULL)908return (NULL);909910if (dr->res_table[bar] != NULL)911return (dr->res_table[bar]);912913res = linuxkpi_pci_iomap(pdev, bar, maxlen);914if (res == NULL) {915/*916* Do not free the devres in case there were917* other valid mappings before already.918*/919return (NULL);920}921lkpi_set_pcim_iomap_devres(dr, bar, res);922923return (res);924}925926void927linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res)928{929struct pci_mmio_region *mmio, *p;930bus_space_handle_t bh = (bus_space_handle_t)res;931932TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {933if (pdev->want_iomap_res) {934if (res != mmio->res)935continue;936} else {937if (bh < rman_get_bushandle(mmio->res) ||938bh >= rman_get_bushandle(mmio->res) +939rman_get_size(mmio->res))940continue;941}942bus_release_resource(pdev->dev.bsddev,943mmio->type, mmio->rid, mmio->res);944TAILQ_REMOVE(&pdev->mmio, mmio, next);945free(mmio, M_DEVBUF);946return;947}948}949950int951linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name)952{953struct pcim_iomap_devres *dr;954void *res;955uint32_t mappings;956int bar;957958dr = lkpi_pcim_iomap_devres_find(pdev);959if (dr == NULL)960return (-ENOMEM);961962/* Now iomap all the requested (by "mask") ones. */963for (bar = mappings = 0; mappings != mask; bar++) {964if ((mask & (1 << bar)) == 0)965continue;966967/* Request double is not allowed. */968if (dr->mmio_table[bar] != NULL) {969device_printf(pdev->dev.bsddev, "%s: bar %d %p\n",970__func__, bar, dr->mmio_table[bar]);971goto err;972}973974res = _lkpi_pci_iomap(pdev, bar, 0);975if (res == NULL)976goto err;977lkpi_set_pcim_iomap_devres(dr, bar, res);978979mappings |= (1 << bar);980}981982return (0);983err:984for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {985if ((mappings & (1 << bar)) != 0) {986res = dr->mmio_table[bar];987if (res == NULL)988continue;989pci_iounmap(pdev, res);990}991}992993return (-EINVAL);994}995996static void997lkpi_pcim_iomap_table_release(struct device *dev, void *p)998{999struct pcim_iomap_devres *dr;1000struct pci_dev *pdev;1001int bar;10021003dr = p;1004pdev = to_pci_dev(dev);1005for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {10061007if (dr->mmio_table[bar] == NULL)1008continue;10091010pci_iounmap(pdev, dr->mmio_table[bar]);1011}1012}10131014static int1015linux_pci_suspend(device_t dev)1016{1017const struct dev_pm_ops *pmops;1018struct pm_message pm = { };1019struct pci_dev *pdev;1020int error;10211022error = 0;1023linux_set_current(curthread);1024pdev = device_get_softc(dev);1025pmops = pdev->pdrv->driver.pm;10261027if (pdev->pdrv->suspend != NULL)1028error = -pdev->pdrv->suspend(pdev, pm);1029else if (pmops != NULL && pmops->suspend != NULL) {1030error = -pmops->suspend(&pdev->dev);1031if (error == 0 && pmops->suspend_late != NULL)1032error = -pmops->suspend_late(&pdev->dev);1033if (error == 0 && pmops->suspend_noirq != NULL)1034error = -pmops->suspend_noirq(&pdev->dev);1035}1036return (error);1037}10381039static int1040linux_pci_resume(device_t dev)1041{1042const struct dev_pm_ops *pmops;1043struct pci_dev *pdev;1044int error;10451046error = 0;1047linux_set_current(curthread);1048pdev = device_get_softc(dev);1049pmops = pdev->pdrv->driver.pm;10501051if (pdev->pdrv->resume != NULL)1052error = -pdev->pdrv->resume(pdev);1053else if (pmops != NULL && pmops->resume != NULL) {1054if (pmops->resume_early != NULL)1055error = -pmops->resume_early(&pdev->dev);1056if (error == 0 && pmops->resume != NULL)1057error = -pmops->resume(&pdev->dev);1058}1059return (error);1060}10611062static int1063linux_pci_shutdown(device_t dev)1064{1065struct pci_dev *pdev;10661067linux_set_current(curthread);1068pdev = device_get_softc(dev);1069if (pdev->pdrv->shutdown != NULL)1070pdev->pdrv->shutdown(pdev);1071return (0);1072}10731074static int1075linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config)1076{1077struct pci_dev *pdev;1078int error;10791080linux_set_current(curthread);1081pdev = device_get_softc(dev);1082if (pdev->pdrv->bsd_iov_init != NULL)1083error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config);1084else1085error = EINVAL;1086return (error);1087}10881089static void1090linux_pci_iov_uninit(device_t dev)1091{1092struct pci_dev *pdev;10931094linux_set_current(curthread);1095pdev = device_get_softc(dev);1096if (pdev->pdrv->bsd_iov_uninit != NULL)1097pdev->pdrv->bsd_iov_uninit(dev);1098}10991100static int1101linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config)1102{1103struct pci_dev *pdev;1104int error;11051106linux_set_current(curthread);1107pdev = device_get_softc(dev);1108if (pdev->pdrv->bsd_iov_add_vf != NULL)1109error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config);1110else1111error = EINVAL;1112return (error);1113}11141115static int1116_linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc)1117{1118int error;11191120linux_set_current(curthread);1121spin_lock(&pci_lock);1122list_add(&pdrv->node, &pci_drivers);1123spin_unlock(&pci_lock);1124if (pdrv->bsddriver.name == NULL)1125pdrv->bsddriver.name = pdrv->name;1126pdrv->bsddriver.methods = pci_methods;1127pdrv->bsddriver.size = sizeof(struct pci_dev);11281129bus_topo_lock();1130error = devclass_add_driver(dc, &pdrv->bsddriver,1131BUS_PASS_DEFAULT, &pdrv->bsdclass);1132bus_topo_unlock();1133return (-error);1134}11351136int1137linux_pci_register_driver(struct pci_driver *pdrv)1138{1139devclass_t dc;11401141pdrv->isdrm = strcmp(pdrv->name, "drmn") == 0;1142dc = pdrv->isdrm ? devclass_create("vgapci") : devclass_find("pci");1143if (dc == NULL)1144return (-ENXIO);1145return (_linux_pci_register_driver(pdrv, dc));1146}11471148static struct resource_list_entry *1149lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve)1150{1151int type;11521153type = pci_resource_type(pdev, bar);1154if (type < 0)1155return (NULL);1156bar = PCIR_BAR(bar);1157return (linux_pci_get_rle(pdev, type, bar, reserve));1158}11591160struct device *1161lkpi_pci_find_irq_dev(unsigned int irq)1162{1163struct pci_dev *pdev;1164struct device *found;11651166found = NULL;1167spin_lock(&pci_lock);1168list_for_each_entry(pdev, &pci_devices, links) {1169if (irq == pdev->dev.irq ||1170(irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) {1171found = &pdev->dev;1172break;1173}1174}1175spin_unlock(&pci_lock);1176return (found);1177}11781179unsigned long1180pci_resource_start(struct pci_dev *pdev, int bar)1181{1182struct resource_list_entry *rle;1183rman_res_t newstart;1184device_t dev;1185int error;11861187if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)1188return (0);1189dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?1190device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;1191error = bus_translate_resource(dev, rle->type, rle->start, &newstart);1192if (error != 0) {1193device_printf(pdev->dev.bsddev,1194"translate of %#jx failed: %d\n",1195(uintmax_t)rle->start, error);1196return (0);1197}1198return (newstart);1199}12001201unsigned long1202pci_resource_len(struct pci_dev *pdev, int bar)1203{1204struct resource_list_entry *rle;12051206if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)1207return (0);1208return (rle->count);1209}12101211static int1212lkpi_pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,1213bool managed)1214{1215struct resource *res;1216struct pci_devres *dr;1217struct pci_mmio_region *mmio;1218int rid;1219int type;12201221if (!lkpi_pci_bar_id_valid(bar))1222return (-EINVAL);12231224type = pci_resource_type(pdev, bar);1225if (type < 0)1226return (0);12271228rid = PCIR_BAR(bar);1229res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid,1230RF_ACTIVE|RF_SHAREABLE);1231if (res == NULL) {1232device_printf(pdev->dev.bsddev, "%s: failed to alloc "1233"bar %d type %d rid %d\n",1234__func__, bar, type, PCIR_BAR(bar));1235return (-EBUSY);1236}12371238/*1239* It seems there is an implicit devres tracking on these if the device1240* is managed (lkpi_pci_devres_find() case); otherwise the resources are1241* not automatically freed on FreeBSD/LinuxKPI though they should be/are1242* expected to be by Linux drivers.1243* Otherwise if we are called from a pcim-function with the managed1244* argument set, we need to track devres independent of pdev->managed.1245*/1246if (managed)1247dr = lkpi_pci_devres_get_alloc(pdev);1248else1249dr = lkpi_pci_devres_find(pdev);1250if (dr != NULL) {1251dr->region_mask |= (1 << bar);1252dr->region_table[bar] = res;1253}12541255/* Even if the device is not managed we need to track it for iomap. */1256mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);1257mmio->rid = PCIR_BAR(bar);1258mmio->type = type;1259mmio->res = res;1260TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);12611262return (0);1263}12641265int1266linuxkpi_pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)1267{1268return (lkpi_pci_request_region(pdev, bar, res_name, false));1269}12701271int1272linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name)1273{1274int error;1275int i;12761277for (i = 0; i <= PCIR_MAX_BAR_0; i++) {1278error = pci_request_region(pdev, i, res_name);1279if (error && error != -EBUSY) {1280pci_release_regions(pdev);1281return (error);1282}1283}1284return (0);1285}12861287int1288linuxkpi_pcim_request_all_regions(struct pci_dev *pdev, const char *res_name)1289{1290int bar, error;12911292for (bar = 0; bar <= PCIR_MAX_BAR_0; bar++) {1293error = lkpi_pci_request_region(pdev, bar, res_name, true);1294if (error != 0 && error != -EBUSY) {1295device_printf(pdev->dev.bsddev, "%s: bar %d res_name '%s': "1296"lkpi_pci_request_region returned %d\n", __func__,1297bar, res_name, error);1298pci_release_regions(pdev);1299return (error);1300}1301}1302return (0);1303}13041305void1306linuxkpi_pci_release_region(struct pci_dev *pdev, int bar)1307{1308struct resource_list_entry *rle;1309struct pci_devres *dr;1310struct pci_mmio_region *mmio, *p;13111312if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL)1313return;13141315/*1316* As we implicitly track the requests we also need to clear them on1317* release. Do clear before resource release.1318*/1319dr = lkpi_pci_devres_find(pdev);1320if (dr != NULL) {1321KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d"1322" region_table res %p != rel->res %p\n", __func__, pdev,1323bar, dr->region_table[bar], rle->res));1324dr->region_table[bar] = NULL;1325dr->region_mask &= ~(1 << bar);1326}13271328TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {1329if (rle->res != (void *)rman_get_bushandle(mmio->res))1330continue;1331TAILQ_REMOVE(&pdev->mmio, mmio, next);1332free(mmio, M_DEVBUF);1333}13341335bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);1336}13371338void1339linuxkpi_pci_release_regions(struct pci_dev *pdev)1340{1341int i;13421343for (i = 0; i <= PCIR_MAX_BAR_0; i++)1344pci_release_region(pdev, i);1345}13461347int1348linux_pci_register_drm_driver(struct pci_driver *pdrv)1349{1350devclass_t dc;13511352dc = devclass_create("vgapci");1353if (dc == NULL)1354return (-ENXIO);1355pdrv->isdrm = true;1356pdrv->name = "drmn";1357return (_linux_pci_register_driver(pdrv, dc));1358}13591360void1361linux_pci_unregister_driver(struct pci_driver *pdrv)1362{1363devclass_t bus;13641365bus = devclass_find(pdrv->isdrm ? "vgapci" : "pci");13661367spin_lock(&pci_lock);1368list_del(&pdrv->node);1369spin_unlock(&pci_lock);1370bus_topo_lock();1371if (bus != NULL)1372devclass_delete_driver(bus, &pdrv->bsddriver);1373bus_topo_unlock();1374}13751376void1377linux_pci_unregister_drm_driver(struct pci_driver *pdrv)1378{1379devclass_t bus;13801381bus = devclass_find("vgapci");13821383spin_lock(&pci_lock);1384list_del(&pdrv->node);1385spin_unlock(&pci_lock);1386bus_topo_lock();1387if (bus != NULL)1388devclass_delete_driver(bus, &pdrv->bsddriver);1389bus_topo_unlock();1390}13911392int1393linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries,1394int nreq)1395{1396struct resource_list_entry *rle;1397int error;1398int avail;1399int i;14001401avail = pci_msix_count(pdev->dev.bsddev);1402if (avail < nreq) {1403if (avail == 0)1404return -EINVAL;1405return avail;1406}1407avail = nreq;1408if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)1409return error;1410/*1411* Handle case where "pci_alloc_msix()" may allocate less1412* interrupts than available and return with no error:1413*/1414if (avail < nreq) {1415pci_release_msi(pdev->dev.bsddev);1416return avail;1417}1418rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);1419pdev->dev.irq_start = rle->start;1420pdev->dev.irq_end = rle->start + avail;1421for (i = 0; i < nreq; i++)1422entries[i].vector = pdev->dev.irq_start + i;1423pdev->msix_enabled = true;1424return (0);1425}14261427int1428_lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec)1429{1430struct resource_list_entry *rle;1431int error;1432int nvec;14331434if (maxvec < minvec)1435return (-EINVAL);14361437nvec = pci_msi_count(pdev->dev.bsddev);1438if (nvec < 1 || nvec < minvec)1439return (-ENOSPC);14401441nvec = min(nvec, maxvec);1442if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0)1443return error;14441445/* Native PCI might only ever ask for 32 vectors. */1446if (nvec < minvec) {1447pci_release_msi(pdev->dev.bsddev);1448return (-ENOSPC);1449}14501451rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);1452pdev->dev.irq_start = rle->start;1453pdev->dev.irq_end = rle->start + nvec;1454pdev->irq = rle->start;1455pdev->msi_enabled = true;1456return (0);1457}14581459int1460pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,1461unsigned int flags)1462{1463int error;14641465if (flags & PCI_IRQ_MSIX) {1466struct msix_entry *entries;1467int i;14681469entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL);1470if (entries == NULL) {1471error = -ENOMEM;1472goto out;1473}1474for (i = 0; i < maxv; ++i)1475entries[i].entry = i;1476error = pci_enable_msix(pdev, entries, maxv);1477out:1478kfree(entries);1479if (error == 0 && pdev->msix_enabled)1480return (pdev->dev.irq_end - pdev->dev.irq_start);1481}1482if (flags & PCI_IRQ_MSI) {1483if (pci_msi_count(pdev->dev.bsddev) < minv)1484return (-ENOSPC);1485error = _lkpi_pci_enable_msi_range(pdev, minv, maxv);1486if (error == 0 && pdev->msi_enabled)1487return (pdev->dev.irq_end - pdev->dev.irq_start);1488}1489if (flags & PCI_IRQ_INTX) {1490if (pdev->irq)1491return (1);1492}14931494return (-EINVAL);1495}14961497struct msi_desc *1498lkpi_pci_msi_desc_alloc(int irq)1499{1500struct device *dev;1501struct pci_dev *pdev;1502struct msi_desc *desc;1503struct pci_devinfo *dinfo;1504struct pcicfg_msi *msi;1505int vec;15061507dev = lkpi_pci_find_irq_dev(irq);1508if (dev == NULL)1509return (NULL);15101511pdev = to_pci_dev(dev);15121513if (pdev->msi_desc == NULL)1514return (NULL);15151516if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end)1517return (NULL);15181519vec = pdev->dev.irq_start - irq;15201521if (pdev->msi_desc[vec] != NULL)1522return (pdev->msi_desc[vec]);15231524dinfo = device_get_ivars(dev->bsddev);1525msi = &dinfo->cfg.msi;15261527desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO);15281529desc->pci.msi_attrib.is_64 =1530(msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false;1531desc->msg.data = msi->msi_data;15321533pdev->msi_desc[vec] = desc;15341535return (desc);1536}15371538bool1539pci_device_is_present(struct pci_dev *pdev)1540{1541device_t dev;15421543dev = pdev->dev.bsddev;15441545return (bus_child_present(dev));1546}15471548CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t));15491550struct linux_dma_obj {1551void *vaddr;1552uint64_t dma_addr;1553bus_dmamap_t dmamap;1554bus_dma_tag_t dmat;1555};15561557static uma_zone_t linux_dma_trie_zone;1558static uma_zone_t linux_dma_obj_zone;15591560static void1561linux_dma_init(void *arg)1562{15631564linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie",1565pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL,1566UMA_ALIGN_PTR, 0);1567linux_dma_obj_zone = uma_zcreate("linux_dma_object",1568sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL,1569UMA_ALIGN_PTR, 0);1570lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK);1571}1572SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL);15731574static void1575linux_dma_uninit(void *arg)1576{15771578counter_u64_free(lkpi_pci_nseg1_fail);1579uma_zdestroy(linux_dma_obj_zone);1580uma_zdestroy(linux_dma_trie_zone);1581}1582SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL);15831584static void *1585linux_dma_trie_alloc(struct pctrie *ptree)1586{15871588return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT));1589}15901591static void1592linux_dma_trie_free(struct pctrie *ptree, void *node)1593{15941595uma_zfree(linux_dma_trie_zone, node);1596}15971598PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc,1599linux_dma_trie_free);16001601#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)1602static dma_addr_t1603linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len,1604bus_dma_tag_t dmat)1605{1606struct linux_dma_priv *priv;1607struct linux_dma_obj *obj;1608int error, nseg;1609bus_dma_segment_t seg;16101611priv = dev->dma_priv;16121613/*1614* If the resultant mapping will be entirely 1:1 with the1615* physical address, short-circuit the remainder of the1616* bus_dma API. This avoids tracking collisions in the pctrie1617* with the additional benefit of reducing overhead.1618*/1619if (bus_dma_id_mapped(dmat, phys, len))1620return (phys);16211622obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT);1623if (obj == NULL) {1624return (0);1625}1626obj->dmat = dmat;16271628DMA_PRIV_LOCK(priv);1629if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) {1630DMA_PRIV_UNLOCK(priv);1631uma_zfree(linux_dma_obj_zone, obj);1632return (0);1633}16341635nseg = -1;1636error = _bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len,1637BUS_DMA_NOWAIT, &seg, &nseg);1638if (error != 0) {1639bus_dmamap_destroy(obj->dmat, obj->dmamap);1640DMA_PRIV_UNLOCK(priv);1641uma_zfree(linux_dma_obj_zone, obj);1642counter_u64_add(lkpi_pci_nseg1_fail, 1);1643if (linuxkpi_debug) {1644device_printf(dev->bsddev, "%s: _bus_dmamap_load_phys "1645"error %d, phys %#018jx len %zu\n", __func__,1646error, (uintmax_t)phys, len);1647dump_stack();1648}1649return (0);1650}16511652KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));1653obj->dma_addr = seg.ds_addr;16541655error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj);1656if (error != 0) {1657bus_dmamap_unload(obj->dmat, obj->dmamap);1658bus_dmamap_destroy(obj->dmat, obj->dmamap);1659DMA_PRIV_UNLOCK(priv);1660uma_zfree(linux_dma_obj_zone, obj);1661return (0);1662}1663DMA_PRIV_UNLOCK(priv);1664return (obj->dma_addr);1665}1666#else1667static dma_addr_t1668linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys,1669size_t len __unused, bus_dma_tag_t dmat __unused)1670{1671return (phys);1672}1673#endif16741675dma_addr_t1676lkpi_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len,1677enum dma_data_direction direction, unsigned long attrs)1678{1679struct linux_dma_priv *priv;1680dma_addr_t dma;16811682priv = dev->dma_priv;1683dma = linux_dma_map_phys_common(dev, phys, len, priv->dmat);1684if (dma_mapping_error(dev, dma))1685return (dma);16861687if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)1688dma_sync_single_for_device(dev, dma, len, direction);16891690return (dma);1691}16921693/* For backward compat only so we can MFC this. Remove before 15. */1694dma_addr_t1695linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)1696{1697return (lkpi_dma_map_phys(dev, phys, len, DMA_NONE, 0));1698}16991700#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)1701void1702lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len,1703enum dma_data_direction direction, unsigned long attrs)1704{1705struct linux_dma_priv *priv;1706struct linux_dma_obj *obj;17071708priv = dev->dma_priv;17091710if (pctrie_is_empty(&priv->ptree))1711return;17121713DMA_PRIV_LOCK(priv);1714obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);1715if (obj == NULL) {1716DMA_PRIV_UNLOCK(priv);1717return;1718}1719LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr);17201721if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0)1722goto skip_sync;17231724/* dma_sync_single_for_cpu() unrolled to avoid lock recursicn. */1725switch (direction) {1726case DMA_BIDIRECTIONAL:1727bus_dmamap_sync(obj->dmat, obj->dmamap, BUS_DMASYNC_POSTREAD);1728bus_dmamap_sync(obj->dmat, obj->dmamap, BUS_DMASYNC_PREREAD);1729break;1730case DMA_TO_DEVICE:1731bus_dmamap_sync(obj->dmat, obj->dmamap, BUS_DMASYNC_POSTWRITE);1732break;1733case DMA_FROM_DEVICE:1734bus_dmamap_sync(obj->dmat, obj->dmamap, BUS_DMASYNC_POSTREAD);1735break;1736default:1737break;1738}17391740skip_sync:1741bus_dmamap_unload(obj->dmat, obj->dmamap);1742bus_dmamap_destroy(obj->dmat, obj->dmamap);1743DMA_PRIV_UNLOCK(priv);17441745uma_zfree(linux_dma_obj_zone, obj);1746}1747#else1748void1749lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len,1750enum dma_data_direction direction, unsigned long attrs)1751{1752}1753#endif17541755/* For backward compat only so we can MFC this. Remove before 15. */1756void1757linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)1758{1759lkpi_dma_unmap(dev, dma_addr, len, DMA_NONE, 0);1760}17611762void *1763linux_dma_alloc_coherent(struct device *dev, size_t size,1764dma_addr_t *dma_handle, gfp_t flag)1765{1766struct linux_dma_priv *priv;1767vm_paddr_t high;1768size_t align;1769void *mem;17701771if (dev == NULL || dev->dma_priv == NULL) {1772*dma_handle = 0;1773return (NULL);1774}1775priv = dev->dma_priv;1776if (priv->dma_coherent_mask)1777high = priv->dma_coherent_mask;1778else1779/* Coherent is lower 32bit only by default in Linux. */1780high = BUS_SPACE_MAXADDR_32BIT;1781align = PAGE_SIZE << get_order(size);1782/* Always zero the allocation. */1783flag |= M_ZERO;1784mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high,1785align, 0, VM_MEMATTR_DEFAULT);1786if (mem != NULL) {1787*dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size,1788priv->dmat_coherent);1789if (*dma_handle == 0) {1790kmem_free(mem, size);1791mem = NULL;1792}1793} else {1794*dma_handle = 0;1795}1796return (mem);1797}17981799struct lkpi_devres_dmam_coherent {1800size_t size;1801dma_addr_t *handle;1802void *mem;1803};18041805static void1806lkpi_dmam_free_coherent(struct device *dev, void *p)1807{1808struct lkpi_devres_dmam_coherent *dr;18091810dr = p;1811dma_free_coherent(dev, dr->size, dr->mem, *dr->handle);1812}18131814static int1815lkpi_dmam_coherent_match(struct device *dev, void *dr, void *mp)1816{1817struct lkpi_devres_dmam_coherent *a, *b;18181819a = dr;1820b = mp;18211822if (a->mem != b->mem)1823return (0);1824if (a->size != b->size || a->handle != b->handle)1825dev_WARN(dev, "for mem %p: size %zu != %zu || handle %#jx != %#jx\n",1826a->mem, a->size, b->size,1827(uintmax_t)a->handle, (uintmax_t)b->handle);1828return (1);1829}18301831void1832linuxkpi_dmam_free_coherent(struct device *dev, size_t size,1833void *addr, dma_addr_t dma_handle)1834{1835struct lkpi_devres_dmam_coherent match = {1836.size = size,1837.handle = &dma_handle,1838.mem = addr1839};1840int error;18411842error = devres_destroy(dev, lkpi_dmam_free_coherent,1843lkpi_dmam_coherent_match, &match);1844if (error != 0)1845dev_WARN(dev, "devres_destroy returned %d, size %zu addr %p "1846"dma_handle %#jx\n", error, size, addr, (uintmax_t)dma_handle);1847dma_free_coherent(dev, size, addr, dma_handle);1848}18491850void *1851linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,1852gfp_t flag)1853{1854struct lkpi_devres_dmam_coherent *dr;18551856dr = lkpi_devres_alloc(lkpi_dmam_free_coherent,1857sizeof(*dr), GFP_KERNEL | __GFP_ZERO);18581859if (dr == NULL)1860return (NULL);18611862dr->size = size;1863dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag);1864dr->handle = dma_handle;1865if (dr->mem == NULL) {1866lkpi_devres_free(dr);1867return (NULL);1868}18691870lkpi_devres_add(dev, dr);1871return (dr->mem);1872}18731874void1875linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size,1876bus_dmasync_op_t op)1877{1878struct linux_dma_priv *priv;1879struct linux_dma_obj *obj;18801881priv = dev->dma_priv;18821883if (pctrie_is_empty(&priv->ptree))1884return;18851886DMA_PRIV_LOCK(priv);1887obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);1888if (obj == NULL) {1889DMA_PRIV_UNLOCK(priv);1890return;1891}18921893bus_dmamap_sync(obj->dmat, obj->dmamap, op);1894DMA_PRIV_UNLOCK(priv);1895}18961897int1898linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents,1899enum dma_data_direction direction, unsigned long attrs)1900{1901struct linux_dma_priv *priv;1902struct scatterlist *sg;1903int i, nseg;1904bus_dma_segment_t seg;19051906priv = dev->dma_priv;19071908DMA_PRIV_LOCK(priv);19091910/* create common DMA map in the first S/G entry */1911if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) {1912DMA_PRIV_UNLOCK(priv);1913return (0);1914}19151916/* load all S/G list entries */1917for_each_sg(sgl, sg, nents, i) {1918nseg = -1;1919if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map,1920sg_phys(sg), sg->length, BUS_DMA_NOWAIT,1921&seg, &nseg) != 0) {1922bus_dmamap_unload(priv->dmat, sgl->dma_map);1923bus_dmamap_destroy(priv->dmat, sgl->dma_map);1924DMA_PRIV_UNLOCK(priv);1925return (0);1926}1927KASSERT(nseg == 0,1928("More than one segment (nseg=%d)", nseg + 1));19291930sg_dma_address(sg) = seg.ds_addr;1931}19321933if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0)1934goto skip_sync;19351936switch (direction) {1937case DMA_BIDIRECTIONAL:1938bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE);1939break;1940case DMA_TO_DEVICE:1941bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD);1942break;1943case DMA_FROM_DEVICE:1944bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE);1945break;1946default:1947break;1948}1949skip_sync:19501951DMA_PRIV_UNLOCK(priv);19521953return (nents);1954}19551956void1957linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,1958int nents __unused, enum dma_data_direction direction,1959unsigned long attrs)1960{1961struct linux_dma_priv *priv;19621963priv = dev->dma_priv;19641965DMA_PRIV_LOCK(priv);19661967if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0)1968goto skip_sync;19691970switch (direction) {1971case DMA_BIDIRECTIONAL:1972bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD);1973bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD);1974break;1975case DMA_TO_DEVICE:1976bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE);1977break;1978case DMA_FROM_DEVICE:1979bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD);1980break;1981default:1982break;1983}1984skip_sync:19851986bus_dmamap_unload(priv->dmat, sgl->dma_map);1987bus_dmamap_destroy(priv->dmat, sgl->dma_map);1988DMA_PRIV_UNLOCK(priv);1989}19901991struct dma_pool {1992struct device *pool_device;1993uma_zone_t pool_zone;1994struct mtx pool_lock;1995bus_dma_tag_t pool_dmat;1996size_t pool_entry_size;1997struct pctrie pool_ptree;1998};19992000#define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock)2001#define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock)20022003static inline int2004dma_pool_obj_ctor(void *mem, int size, void *arg, int flags)2005{2006struct linux_dma_obj *obj = mem;2007struct dma_pool *pool = arg;2008int error, nseg;2009bus_dma_segment_t seg;20102011nseg = -1;2012DMA_POOL_LOCK(pool);2013error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap,2014vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT,2015&seg, &nseg);2016DMA_POOL_UNLOCK(pool);2017if (error != 0) {2018return (error);2019}2020KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));2021obj->dma_addr = seg.ds_addr;20222023return (0);2024}20252026static void2027dma_pool_obj_dtor(void *mem, int size, void *arg)2028{2029struct linux_dma_obj *obj = mem;2030struct dma_pool *pool = arg;20312032DMA_POOL_LOCK(pool);2033bus_dmamap_unload(pool->pool_dmat, obj->dmamap);2034DMA_POOL_UNLOCK(pool);2035}20362037static int2038dma_pool_obj_import(void *arg, void **store, int count, int domain __unused,2039int flags)2040{2041struct dma_pool *pool = arg;2042struct linux_dma_obj *obj;2043int error, i;20442045for (i = 0; i < count; i++) {2046obj = uma_zalloc(linux_dma_obj_zone, flags);2047if (obj == NULL)2048break;20492050error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr,2051BUS_DMA_NOWAIT, &obj->dmamap);2052if (error!= 0) {2053uma_zfree(linux_dma_obj_zone, obj);2054break;2055}20562057store[i] = obj;2058}20592060return (i);2061}20622063static void2064dma_pool_obj_release(void *arg, void **store, int count)2065{2066struct dma_pool *pool = arg;2067struct linux_dma_obj *obj;2068int i;20692070for (i = 0; i < count; i++) {2071obj = store[i];2072bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap);2073uma_zfree(linux_dma_obj_zone, obj);2074}2075}20762077struct dma_pool *2078linux_dma_pool_create(char *name, struct device *dev, size_t size,2079size_t align, size_t boundary)2080{2081struct linux_dma_priv *priv;2082struct dma_pool *pool;20832084priv = dev->dma_priv;20852086pool = kzalloc(sizeof(*pool), GFP_KERNEL);2087pool->pool_device = dev;2088pool->pool_entry_size = size;20892090if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),2091align, boundary, /* alignment, boundary */2092priv->dma_mask, /* lowaddr */2093BUS_SPACE_MAXADDR, /* highaddr */2094NULL, NULL, /* filtfunc, filtfuncarg */2095size, /* maxsize */20961, /* nsegments */2097size, /* maxsegsz */20980, /* flags */2099NULL, NULL, /* lockfunc, lockfuncarg */2100&pool->pool_dmat)) {2101kfree(pool);2102return (NULL);2103}21042105pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor,2106dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import,2107dma_pool_obj_release, pool, 0);21082109mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF);2110pctrie_init(&pool->pool_ptree);21112112return (pool);2113}21142115void2116linux_dma_pool_destroy(struct dma_pool *pool)2117{21182119uma_zdestroy(pool->pool_zone);2120bus_dma_tag_destroy(pool->pool_dmat);2121mtx_destroy(&pool->pool_lock);2122kfree(pool);2123}21242125void2126lkpi_dmam_pool_destroy(struct device *dev, void *p)2127{2128struct dma_pool *pool;21292130pool = *(struct dma_pool **)p;2131LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree);2132linux_dma_pool_destroy(pool);2133}21342135void *2136linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,2137dma_addr_t *handle)2138{2139struct linux_dma_obj *obj;21402141obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK);2142if (obj == NULL)2143return (NULL);21442145DMA_POOL_LOCK(pool);2146if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) {2147DMA_POOL_UNLOCK(pool);2148uma_zfree_arg(pool->pool_zone, obj, pool);2149return (NULL);2150}2151DMA_POOL_UNLOCK(pool);21522153*handle = obj->dma_addr;2154return (obj->vaddr);2155}21562157void2158linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr)2159{2160struct linux_dma_obj *obj;21612162DMA_POOL_LOCK(pool);2163obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr);2164if (obj == NULL) {2165DMA_POOL_UNLOCK(pool);2166return;2167}2168LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr);2169DMA_POOL_UNLOCK(pool);21702171uma_zfree_arg(pool->pool_zone, obj, pool);2172}21732174static int2175linux_backlight_get_status(device_t dev, struct backlight_props *props)2176{2177struct pci_dev *pdev;21782179linux_set_current(curthread);2180pdev = device_get_softc(dev);21812182props->brightness = pdev->dev.bd->props.brightness;2183props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness;2184props->nlevels = 0;21852186return (0);2187}21882189static int2190linux_backlight_get_info(device_t dev, struct backlight_info *info)2191{2192struct pci_dev *pdev;21932194linux_set_current(curthread);2195pdev = device_get_softc(dev);21962197info->type = BACKLIGHT_TYPE_PANEL;2198strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH);2199return (0);2200}22012202static int2203linux_backlight_update_status(device_t dev, struct backlight_props *props)2204{2205struct pci_dev *pdev;22062207linux_set_current(curthread);2208pdev = device_get_softc(dev);22092210pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness *2211props->brightness / 100;2212pdev->dev.bd->props.power = props->brightness == 0 ?22134/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */;2214return (pdev->dev.bd->ops->update_status(pdev->dev.bd));2215}22162217struct backlight_device *2218linux_backlight_device_register(const char *name, struct device *dev,2219void *data, const struct backlight_ops *ops, struct backlight_properties *props)2220{22212222dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO);2223dev->bd->ops = ops;2224dev->bd->props.type = props->type;2225dev->bd->props.max_brightness = props->max_brightness;2226dev->bd->props.brightness = props->brightness;2227dev->bd->props.power = props->power;2228dev->bd->data = data;2229dev->bd->dev = dev;2230dev->bd->name = strdup(name, M_DEVBUF);22312232dev->backlight_dev = backlight_register(name, dev->bsddev);22332234return (dev->bd);2235}22362237void2238linux_backlight_device_unregister(struct backlight_device *bd)2239{22402241backlight_destroy(bd->dev->backlight_dev);2242free(bd->name, M_DEVBUF);2243free(bd, M_DEVBUF);2244}224522462247