Path: blob/main/sys/arm/broadcom/bcm2835/bcm2835_dma.c
39566 views
/*-1* SPDX-License-Identifier: BSD-2-Clause2*3* Copyright (c) 2013 Daisuke Aoyama <[email protected]>4* Copyright (c) 2013 Oleksandr Tymoshenko <[email protected]>5*6* Redistribution and use in source and binary forms, with or without7* modification, are permitted provided that the following conditions8* are met:9* 1. Redistributions of source code must retain the above copyright10* notice, this list of conditions and the following disclaimer.11* 2. Redistributions in binary form must reproduce the above copyright12* notice, this list of conditions and the following disclaimer in the13* documentation and/or other materials provided with the distribution.14*15* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND16* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE17* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE18* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE19* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL20* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS21* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)22* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT23* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY24* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF25* SUCH DAMAGE.26*27*/2829#include <sys/param.h>30#include <sys/systm.h>31#include <sys/bus.h>32#include <sys/kernel.h>33#include <sys/lock.h>34#include <sys/malloc.h>35#include <sys/module.h>36#include <sys/mutex.h>37#include <sys/queue.h>38#include <sys/resource.h>39#include <sys/rman.h>4041#include <dev/ofw/openfirm.h>42#include <dev/ofw/ofw_bus.h>43#include <dev/ofw/ofw_bus_subr.h>4445#include <vm/vm.h>46#include <vm/pmap.h>47#include <machine/bus.h>4849#include "bcm2835_dma.h"50#include "bcm2835_vcbus.h"5152#define MAX_REG 95354/* private flags */55#define BCM_DMA_CH_USED 0x0000000156#define BCM_DMA_CH_FREE 0x4000000057#define BCM_DMA_CH_UNMAP 0x800000005859/* Register Map (4.2.1.2) */60#define BCM_DMA_CS(n) (0x100*(n) + 0x00)61#define CS_ACTIVE (1 << 0)62#define CS_END (1 << 1)63#define CS_INT (1 << 2)64#define CS_DREQ (1 << 3)65#define CS_ISPAUSED (1 << 4)66#define CS_ISHELD (1 << 5)67#define CS_ISWAIT (1 << 6)68#define CS_ERR (1 << 8)69#define CS_WAITWRT (1 << 28)70#define CS_DISDBG (1 << 29)71#define CS_ABORT (1 << 30)72#define CS_RESET (1U << 31)73#define BCM_DMA_CBADDR(n) (0x100*(n) + 0x04)74#define BCM_DMA_INFO(n) (0x100*(n) + 0x08)75#define INFO_INT_EN (1 << 0)76#define INFO_TDMODE (1 << 1)77#define INFO_WAIT_RESP (1 << 3)78#define INFO_D_INC (1 << 4)79#define INFO_D_WIDTH (1 << 5)80#define INFO_D_DREQ (1 << 6)81#define INFO_S_INC (1 << 8)82#define INFO_S_WIDTH (1 << 9)83#define INFO_S_DREQ (1 << 10)84#define INFO_WAITS_SHIFT (21)85#define INFO_PERMAP_SHIFT (16)86#define INFO_PERMAP_MASK (0x1f << INFO_PERMAP_SHIFT)8788#define BCM_DMA_SRC(n) (0x100*(n) + 0x0C)89#define BCM_DMA_DST(n) (0x100*(n) + 0x10)90#define BCM_DMA_LEN(n) (0x100*(n) + 0x14)91#define BCM_DMA_STRIDE(n) (0x100*(n) + 0x18)92#define BCM_DMA_CBNEXT(n) (0x100*(n) + 0x1C)93#define BCM_DMA_DEBUG(n) (0x100*(n) + 0x20)94#define DEBUG_ERROR_MASK (7)9596#define BCM_DMA_INT_STATUS 0xfe097#define BCM_DMA_ENABLE 0xff09899/* relative offset from BCM_VC_DMA0_BASE (p.39) */100#define BCM_DMA_CH(n) (0x100*(n))101102/* channels used by GPU */103#define BCM_DMA_CH_BULK 0104#define BCM_DMA_CH_FAST1 2105#define BCM_DMA_CH_FAST2 3106107#define BCM_DMA_CH_GPU_MASK ((1 << BCM_DMA_CH_BULK) | \108(1 << BCM_DMA_CH_FAST1) | \109(1 << BCM_DMA_CH_FAST2))110111/* DMA Control Block - 256bit aligned (p.40) */112struct bcm_dma_cb {113uint32_t info; /* Transfer Information */114uint32_t src; /* Source Address */115uint32_t dst; /* Destination Address */116uint32_t len; /* Transfer Length */117uint32_t stride; /* 2D Mode Stride */118uint32_t next; /* Next Control Block Address */119uint32_t rsvd1; /* Reserved */120uint32_t rsvd2; /* Reserved */121};122123#ifdef DEBUG124static void bcm_dma_cb_dump(struct bcm_dma_cb *cb);125static void bcm_dma_reg_dump(int ch);126#endif127128/* DMA channel private info */129struct bcm_dma_ch {130int ch;131uint32_t flags;132struct bcm_dma_cb * cb;133uint32_t vc_cb;134bus_dmamap_t dma_map;135void (*intr_func)(int, void *);136void * intr_arg;137};138139struct bcm_dma_softc {140device_t sc_dev;141struct mtx sc_mtx;142struct resource * sc_mem;143struct resource * sc_irq[BCM_DMA_CH_MAX];144void * sc_intrhand[BCM_DMA_CH_MAX];145struct bcm_dma_ch sc_dma_ch[BCM_DMA_CH_MAX];146bus_dma_tag_t sc_dma_tag;147};148149static struct bcm_dma_softc *bcm_dma_sc = NULL;150static uint32_t bcm_dma_channel_mask;151152static struct ofw_compat_data compat_data[] = {153{"broadcom,bcm2835-dma", 1},154{"brcm,bcm2835-dma", 1},155{NULL, 0}156};157158static void159bcm_dmamap_cb(void *arg, bus_dma_segment_t *segs,160int nseg, int err)161{162bus_addr_t *addr;163164if (err)165return;166167addr = (bus_addr_t*)arg;168*addr = ARMC_TO_VCBUS(segs[0].ds_addr);169}170171static void172bcm_dma_reset(device_t dev, int ch)173{174struct bcm_dma_softc *sc = device_get_softc(dev);175struct bcm_dma_cb *cb;176uint32_t cs;177int count;178179if (ch < 0 || ch >= BCM_DMA_CH_MAX)180return;181182cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch));183184if (cs & CS_ACTIVE) {185/* pause current task */186bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), 0);187188count = 1000;189do {190cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch));191} while (!(cs & CS_ISPAUSED) && (count-- > 0));192193if (!(cs & CS_ISPAUSED)) {194device_printf(dev,195"Can't abort DMA transfer at channel %d\n", ch);196}197198bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0);199200/* Complete everything, clear interrupt */201bus_write_4(sc->sc_mem, BCM_DMA_CS(ch),202CS_ABORT | CS_INT | CS_END| CS_ACTIVE);203}204205/* clear control blocks */206bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), 0);207bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0);208209/* Reset control block */210cb = sc->sc_dma_ch[ch].cb;211bzero(cb, sizeof(*cb));212cb->info = INFO_WAIT_RESP;213}214215static int216bcm_dma_init(device_t dev)217{218struct bcm_dma_softc *sc = device_get_softc(dev);219uint32_t reg;220struct bcm_dma_ch *ch;221void *cb_virt;222vm_paddr_t cb_phys;223int err;224int i;225226/*227* Only channels set in bcm_dma_channel_mask can be controlled by us.228* The others are out of our control as well as the corresponding bits229* in both BCM_DMA_ENABLE and BCM_DMA_INT_STATUS global registers. As230* these registers are RW ones, there is no safe way how to write only231* the bits which can be controlled by us.232*233* Fortunately, after reset, all channels are enabled in BCM_DMA_ENABLE234* register and all statuses are cleared in BCM_DMA_INT_STATUS one.235* Not touching these registers is a trade off between correct236* initialization which does not count on anything and not messing up237* something we have no control over.238*/239reg = bus_read_4(sc->sc_mem, BCM_DMA_ENABLE);240if ((reg & bcm_dma_channel_mask) != bcm_dma_channel_mask)241device_printf(dev, "channels are not enabled\n");242reg = bus_read_4(sc->sc_mem, BCM_DMA_INT_STATUS);243if ((reg & bcm_dma_channel_mask) != 0)244device_printf(dev, "statuses are not cleared\n");245246/*247* Allocate DMA chunks control blocks based on p.40 of the peripheral248* spec - control block should be 32-bit aligned. The DMA controller249* has a full 32-bit register dedicated to this address, so we do not250* need to bother with the per-SoC peripheral restrictions.251*/252err = bus_dma_tag_create(bus_get_dma_tag(dev),2531, 0, BUS_SPACE_MAXADDR_32BIT,254BUS_SPACE_MAXADDR, NULL, NULL,255sizeof(struct bcm_dma_cb), 1,256sizeof(struct bcm_dma_cb),257BUS_DMA_ALLOCNOW, NULL, NULL,258&sc->sc_dma_tag);259260if (err) {261device_printf(dev, "failed allocate DMA tag\n");262return (err);263}264265/* setup initial settings */266for (i = 0; i < BCM_DMA_CH_MAX; i++) {267ch = &sc->sc_dma_ch[i];268269bzero(ch, sizeof(struct bcm_dma_ch));270ch->ch = i;271ch->flags = BCM_DMA_CH_UNMAP;272273if ((bcm_dma_channel_mask & (1 << i)) == 0)274continue;275276err = bus_dmamem_alloc(sc->sc_dma_tag, &cb_virt,277BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,278&ch->dma_map);279if (err) {280device_printf(dev, "cannot allocate DMA memory\n");281break;282}283284/*285* Least alignment for busdma-allocated stuff is cache286* line size, so just make sure nothing stupid happened287* and we got properly aligned address288*/289if ((uintptr_t)cb_virt & 0x1f) {290device_printf(dev,291"DMA address is not 32-bytes aligned: %p\n",292(void*)cb_virt);293break;294}295296err = bus_dmamap_load(sc->sc_dma_tag, ch->dma_map, cb_virt,297sizeof(struct bcm_dma_cb), bcm_dmamap_cb, &cb_phys,298BUS_DMA_WAITOK);299if (err) {300device_printf(dev, "cannot load DMA memory\n");301break;302}303304ch->cb = cb_virt;305ch->vc_cb = cb_phys;306ch->flags = BCM_DMA_CH_FREE;307ch->cb->info = INFO_WAIT_RESP;308309/* reset DMA engine */310bus_write_4(sc->sc_mem, BCM_DMA_CS(i), CS_RESET);311}312313return (0);314}315316/*317* Allocate DMA channel for further use, returns channel # or318* BCM_DMA_CH_INVALID319*/320int321bcm_dma_allocate(int req_ch)322{323struct bcm_dma_softc *sc = bcm_dma_sc;324int ch = BCM_DMA_CH_INVALID;325int i;326327if (sc == NULL)328return (BCM_DMA_CH_INVALID);329330if (req_ch >= BCM_DMA_CH_MAX)331return (BCM_DMA_CH_INVALID);332333/* Auto(req_ch < 0) or CH specified */334mtx_lock(&sc->sc_mtx);335336if (req_ch < 0) {337for (i = 0; i < BCM_DMA_CH_MAX; i++) {338if (sc->sc_dma_ch[i].flags & BCM_DMA_CH_FREE) {339ch = i;340sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE;341sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED;342break;343}344}345} else if (sc->sc_dma_ch[req_ch].flags & BCM_DMA_CH_FREE) {346ch = req_ch;347sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE;348sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED;349}350351mtx_unlock(&sc->sc_mtx);352return (ch);353}354355/*356* Frees allocated channel. Returns 0 on success, -1 otherwise357*/358int359bcm_dma_free(int ch)360{361struct bcm_dma_softc *sc = bcm_dma_sc;362363if (sc == NULL)364return (-1);365366if (ch < 0 || ch >= BCM_DMA_CH_MAX)367return (-1);368369mtx_lock(&sc->sc_mtx);370if (sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED) {371sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_FREE;372sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_USED;373sc->sc_dma_ch[ch].intr_func = NULL;374sc->sc_dma_ch[ch].intr_arg = NULL;375376/* reset DMA engine */377bcm_dma_reset(sc->sc_dev, ch);378}379380mtx_unlock(&sc->sc_mtx);381return (0);382}383384/*385* Assign handler function for channel interrupt386* Returns 0 on success, -1 otherwise387*/388int389bcm_dma_setup_intr(int ch, void (*func)(int, void *), void *arg)390{391struct bcm_dma_softc *sc = bcm_dma_sc;392struct bcm_dma_cb *cb;393394if (sc == NULL)395return (-1);396397if (ch < 0 || ch >= BCM_DMA_CH_MAX)398return (-1);399400if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED))401return (-1);402403sc->sc_dma_ch[ch].intr_func = func;404sc->sc_dma_ch[ch].intr_arg = arg;405cb = sc->sc_dma_ch[ch].cb;406cb->info |= INFO_INT_EN;407408return (0);409}410411/*412* Setup DMA source parameters413* ch - channel number414* dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if415* source is physical memory416* inc_addr - BCM_DMA_INC_ADDR if source address417* should be increased after each access or418* BCM_DMA_SAME_ADDR if address should remain419* the same420* width - size of read operation, BCM_DMA_32BIT421* for 32bit bursts, BCM_DMA_128BIT for 128 bits422*423* Returns 0 on success, -1 otherwise424*/425int426bcm_dma_setup_src(int ch, int dreq, int inc_addr, int width)427{428struct bcm_dma_softc *sc = bcm_dma_sc;429uint32_t info;430431if (ch < 0 || ch >= BCM_DMA_CH_MAX)432return (-1);433434if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED))435return (-1);436437info = sc->sc_dma_ch[ch].cb->info;438info &= ~INFO_PERMAP_MASK;439info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK;440441if (dreq)442info |= INFO_S_DREQ;443else444info &= ~INFO_S_DREQ;445446if (width == BCM_DMA_128BIT)447info |= INFO_S_WIDTH;448else449info &= ~INFO_S_WIDTH;450451if (inc_addr == BCM_DMA_INC_ADDR)452info |= INFO_S_INC;453else454info &= ~INFO_S_INC;455456sc->sc_dma_ch[ch].cb->info = info;457458return (0);459}460461/*462* Setup DMA destination parameters463* ch - channel number464* dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if465* destination is physical memory466* inc_addr - BCM_DMA_INC_ADDR if source address467* should be increased after each access or468* BCM_DMA_SAME_ADDR if address should remain469* the same470* width - size of write operation, BCM_DMA_32BIT471* for 32bit bursts, BCM_DMA_128BIT for 128 bits472*473* Returns 0 on success, -1 otherwise474*/475int476bcm_dma_setup_dst(int ch, int dreq, int inc_addr, int width)477{478struct bcm_dma_softc *sc = bcm_dma_sc;479uint32_t info;480481if (ch < 0 || ch >= BCM_DMA_CH_MAX)482return (-1);483484if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED))485return (-1);486487info = sc->sc_dma_ch[ch].cb->info;488info &= ~INFO_PERMAP_MASK;489info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK;490491if (dreq)492info |= INFO_D_DREQ;493else494info &= ~INFO_D_DREQ;495496if (width == BCM_DMA_128BIT)497info |= INFO_D_WIDTH;498else499info &= ~INFO_D_WIDTH;500501if (inc_addr == BCM_DMA_INC_ADDR)502info |= INFO_D_INC;503else504info &= ~INFO_D_INC;505506sc->sc_dma_ch[ch].cb->info = info;507508return (0);509}510511#ifdef DEBUG512void513bcm_dma_cb_dump(struct bcm_dma_cb *cb)514{515516printf("DMA CB ");517printf("INFO: %8.8x ", cb->info);518printf("SRC: %8.8x ", cb->src);519printf("DST: %8.8x ", cb->dst);520printf("LEN: %8.8x ", cb->len);521printf("\n");522printf("STRIDE: %8.8x ", cb->stride);523printf("NEXT: %8.8x ", cb->next);524printf("RSVD1: %8.8x ", cb->rsvd1);525printf("RSVD2: %8.8x ", cb->rsvd2);526printf("\n");527}528529void530bcm_dma_reg_dump(int ch)531{532struct bcm_dma_softc *sc = bcm_dma_sc;533int i;534uint32_t reg;535536if (sc == NULL)537return;538539if (ch < 0 || ch >= BCM_DMA_CH_MAX)540return;541542printf("DMA%d: ", ch);543for (i = 0; i < MAX_REG; i++) {544reg = bus_read_4(sc->sc_mem, BCM_DMA_CH(ch) + i*4);545printf("%8.8x ", reg);546}547printf("\n");548}549#endif550551/*552* Start DMA transaction553* ch - channel number554* src, dst - source and destination address in555* ARM physical memory address space.556* len - amount of bytes to be transferred557*558* Returns 0 on success, -1 otherwise559*/560int561bcm_dma_start(int ch, vm_paddr_t src, vm_paddr_t dst, int len)562{563struct bcm_dma_softc *sc = bcm_dma_sc;564struct bcm_dma_cb *cb;565566if (sc == NULL)567return (-1);568569if (ch < 0 || ch >= BCM_DMA_CH_MAX)570return (-1);571572if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED))573return (-1);574575cb = sc->sc_dma_ch[ch].cb;576cb->src = ARMC_TO_VCBUS(src);577cb->dst = ARMC_TO_VCBUS(dst);578579cb->len = len;580581bus_dmamap_sync(sc->sc_dma_tag,582sc->sc_dma_ch[ch].dma_map, BUS_DMASYNC_PREWRITE);583584bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch),585sc->sc_dma_ch[ch].vc_cb);586bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), CS_ACTIVE);587588#ifdef DEBUG589bcm_dma_cb_dump(sc->sc_dma_ch[ch].cb);590bcm_dma_reg_dump(ch);591#endif592593return (0);594}595596/*597* Get length requested for DMA transaction598* ch - channel number599*600* Returns size of transaction, 0 if channel is invalid601*/602uint32_t603bcm_dma_length(int ch)604{605struct bcm_dma_softc *sc = bcm_dma_sc;606struct bcm_dma_cb *cb;607608if (sc == NULL)609return (0);610611if (ch < 0 || ch >= BCM_DMA_CH_MAX)612return (0);613614if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED))615return (0);616617cb = sc->sc_dma_ch[ch].cb;618619return (cb->len);620}621622static void623bcm_dma_intr(void *arg)624{625struct bcm_dma_softc *sc = bcm_dma_sc;626struct bcm_dma_ch *ch = (struct bcm_dma_ch *)arg;627uint32_t cs, debug;628629/* my interrupt? */630cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch->ch));631632/*633* Is it an active channel? Our diagnostics could be better here, but634* it's not necessarily an easy task to resolve a rid/resource to an635* actual irq number. We'd want to do this to set a flag indicating636* whether the irq is shared or not, so we know to complain.637*/638if (!(ch->flags & BCM_DMA_CH_USED))639return;640641/* Again, we can't complain here. The same logic applies. */642if (!(cs & (CS_INT | CS_ERR)))643return;644645if (cs & CS_ERR) {646debug = bus_read_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch));647device_printf(sc->sc_dev, "DMA error %d on CH%d\n",648debug & DEBUG_ERROR_MASK, ch->ch);649bus_write_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch),650debug & DEBUG_ERROR_MASK);651bcm_dma_reset(sc->sc_dev, ch->ch);652}653654if (cs & CS_INT) {655/* acknowledge interrupt */656bus_write_4(sc->sc_mem, BCM_DMA_CS(ch->ch),657CS_INT | CS_END);658659/* Prepare for possible access to len field */660bus_dmamap_sync(sc->sc_dma_tag, ch->dma_map,661BUS_DMASYNC_POSTWRITE);662663/* save callback function and argument */664if (ch->intr_func)665ch->intr_func(ch->ch, ch->intr_arg);666}667}668669static int670bcm_dma_probe(device_t dev)671{672673if (!ofw_bus_status_okay(dev))674return (ENXIO);675676if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)677return (ENXIO);678679device_set_desc(dev, "BCM2835 DMA Controller");680return (BUS_PROBE_DEFAULT);681}682683static int684bcm_dma_attach(device_t dev)685{686struct bcm_dma_softc *sc = device_get_softc(dev);687phandle_t node;688int rid, err = 0;689int i;690691sc->sc_dev = dev;692693if (bcm_dma_sc)694return (ENXIO);695696for (i = 0; i < BCM_DMA_CH_MAX; i++) {697sc->sc_irq[i] = NULL;698sc->sc_intrhand[i] = NULL;699}700701/* Get DMA channel mask. */702node = ofw_bus_get_node(sc->sc_dev);703if (OF_getencprop(node, "brcm,dma-channel-mask", &bcm_dma_channel_mask,704sizeof(bcm_dma_channel_mask)) == -1 &&705OF_getencprop(node, "broadcom,channels", &bcm_dma_channel_mask,706sizeof(bcm_dma_channel_mask)) == -1) {707device_printf(dev, "could not get channel mask property\n");708return (ENXIO);709}710711/* Mask out channels used by GPU. */712bcm_dma_channel_mask &= ~BCM_DMA_CH_GPU_MASK;713714/* DMA0 - DMA14 */715rid = 0;716sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);717if (sc->sc_mem == NULL) {718device_printf(dev, "could not allocate memory resource\n");719return (ENXIO);720}721722/* IRQ DMA0 - DMA11 XXX NOT USE DMA12(spurious?) */723for (rid = 0; rid < BCM_DMA_CH_MAX; rid++) {724if ((bcm_dma_channel_mask & (1 << rid)) == 0)725continue;726727sc->sc_irq[rid] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,728RF_ACTIVE | RF_SHAREABLE);729if (sc->sc_irq[rid] == NULL) {730device_printf(dev, "cannot allocate interrupt\n");731err = ENXIO;732goto fail;733}734if (bus_setup_intr(dev, sc->sc_irq[rid], INTR_TYPE_MISC | INTR_MPSAFE,735NULL, bcm_dma_intr, &sc->sc_dma_ch[rid],736&sc->sc_intrhand[rid])) {737device_printf(dev, "cannot setup interrupt handler\n");738err = ENXIO;739goto fail;740}741}742743mtx_init(&sc->sc_mtx, "bcmdma", "bcmdma", MTX_DEF);744bcm_dma_sc = sc;745746err = bcm_dma_init(dev);747if (err)748goto fail;749750return (err);751752fail:753if (sc->sc_mem)754bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem);755756for (i = 0; i < BCM_DMA_CH_MAX; i++) {757if (sc->sc_intrhand[i])758bus_teardown_intr(dev, sc->sc_irq[i], sc->sc_intrhand[i]);759if (sc->sc_irq[i])760bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq[i]);761}762763return (err);764}765766static device_method_t bcm_dma_methods[] = {767DEVMETHOD(device_probe, bcm_dma_probe),768DEVMETHOD(device_attach, bcm_dma_attach),769{ 0, 0 }770};771772static driver_t bcm_dma_driver = {773"bcm_dma",774bcm_dma_methods,775sizeof(struct bcm_dma_softc),776};777778EARLY_DRIVER_MODULE(bcm_dma, simplebus, bcm_dma_driver, 0, 0,779BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE);780MODULE_VERSION(bcm_dma, 1);781782783