Path: blob/master/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
26282 views
// SPDX-License-Identifier: GPL-2.01// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)23/*4* Synopsys DesignWare AXI DMA Controller driver.5*6* Author: Eugeniy Paltsev <[email protected]>7*/89#include <linux/bitops.h>10#include <linux/delay.h>11#include <linux/device.h>12#include <linux/dmaengine.h>13#include <linux/dmapool.h>14#include <linux/dma-mapping.h>15#include <linux/err.h>16#include <linux/interrupt.h>17#include <linux/io.h>18#include <linux/iopoll.h>19#include <linux/io-64-nonatomic-lo-hi.h>20#include <linux/kernel.h>21#include <linux/module.h>22#include <linux/of.h>23#include <linux/of_dma.h>24#include <linux/platform_device.h>25#include <linux/pm_runtime.h>26#include <linux/property.h>27#include <linux/reset.h>28#include <linux/slab.h>29#include <linux/types.h>3031#include "dw-axi-dmac.h"32#include "../dmaengine.h"33#include "../virt-dma.h"3435/*36* The set of bus widths supported by the DMA controller. DW AXI DMAC supports37* master data bus width up to 512 bits (for both AXI master interfaces), but38* it depends on IP block configuration.39*/40#define AXI_DMA_BUSWIDTHS \41(DMA_SLAVE_BUSWIDTH_1_BYTE | \42DMA_SLAVE_BUSWIDTH_2_BYTES | \43DMA_SLAVE_BUSWIDTH_4_BYTES | \44DMA_SLAVE_BUSWIDTH_8_BYTES | \45DMA_SLAVE_BUSWIDTH_16_BYTES | \46DMA_SLAVE_BUSWIDTH_32_BYTES | \47DMA_SLAVE_BUSWIDTH_64_BYTES)4849#define AXI_DMA_FLAG_HAS_APB_REGS BIT(0)50#define AXI_DMA_FLAG_HAS_RESETS BIT(1)51#define AXI_DMA_FLAG_USE_CFG2 BIT(2)5253static inline void54axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)55{56iowrite32(val, chip->regs + reg);57}5859static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)60{61return ioread32(chip->regs + reg);62}6364static inline void65axi_dma_iowrite64(struct axi_dma_chip *chip, u32 reg, u64 val)66{67iowrite64(val, chip->regs + reg);68}6970static inline u64 axi_dma_ioread64(struct axi_dma_chip *chip, u32 reg)71{72return ioread64(chip->regs + reg);73}7475static inline void76axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)77{78iowrite32(val, chan->chan_regs + reg);79}8081static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)82{83return ioread32(chan->chan_regs + reg);84}8586static inline void87axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)88{89/*90* We split one 64 bit write for two 32 bit write as some HW doesn't91* support 64 bit access.92*/93iowrite32(lower_32_bits(val), chan->chan_regs + reg);94iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);95}9697static inline void axi_chan_config_write(struct axi_dma_chan *chan,98struct axi_dma_chan_config *config)99{100u32 cfg_lo, cfg_hi;101102cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |103config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);104if (chan->chip->dw->hdata->reg_map_8_channels &&105!chan->chip->dw->hdata->use_cfg2) {106cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |107config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |108config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |109config->src_per << CH_CFG_H_SRC_PER_POS |110config->dst_per << CH_CFG_H_DST_PER_POS |111config->prior << CH_CFG_H_PRIORITY_POS;112} else {113cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS |114config->dst_per << CH_CFG2_L_DST_PER_POS;115cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS |116config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS |117config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS |118config->prior << CH_CFG2_H_PRIORITY_POS;119}120axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo);121axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi);122}123124static inline void axi_dma_disable(struct axi_dma_chip *chip)125{126u32 val;127128val = axi_dma_ioread32(chip, DMAC_CFG);129val &= ~DMAC_EN_MASK;130axi_dma_iowrite32(chip, DMAC_CFG, val);131}132133static inline void axi_dma_enable(struct axi_dma_chip *chip)134{135u32 val;136137val = axi_dma_ioread32(chip, DMAC_CFG);138val |= DMAC_EN_MASK;139axi_dma_iowrite32(chip, DMAC_CFG, val);140}141142static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)143{144u32 val;145146val = axi_dma_ioread32(chip, DMAC_CFG);147val &= ~INT_EN_MASK;148axi_dma_iowrite32(chip, DMAC_CFG, val);149}150151static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)152{153u32 val;154155val = axi_dma_ioread32(chip, DMAC_CFG);156val |= INT_EN_MASK;157axi_dma_iowrite32(chip, DMAC_CFG, val);158}159160static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)161{162u32 val;163164if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {165axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);166} else {167val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);168val &= ~irq_mask;169axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);170}171}172173static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)174{175axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);176}177178static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)179{180axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);181}182183static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)184{185axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);186}187188static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)189{190return axi_chan_ioread32(chan, CH_INTSTATUS);191}192193static inline void axi_chan_disable(struct axi_dma_chan *chan)194{195u64 val;196197if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {198val = axi_dma_ioread64(chan->chip, DMAC_CHEN);199if (chan->id >= DMAC_CHAN_16) {200val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)201<< (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT));202val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)203<< (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);204} else {205val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);206val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;207}208axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);209} else {210val = axi_dma_ioread32(chan->chip, DMAC_CHEN);211val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);212if (chan->chip->dw->hdata->reg_map_8_channels)213val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;214else215val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;216axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);217}218}219220static inline void axi_chan_enable(struct axi_dma_chan *chan)221{222u64 val;223224if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {225val = axi_dma_ioread64(chan->chip, DMAC_CHEN);226if (chan->id >= DMAC_CHAN_16) {227val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)228<< (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |229(u64)(BIT(chan->id) >> DMAC_CHAN_16)230<< (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);231} else {232val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |233BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;234}235axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);236} else {237val = axi_dma_ioread32(chan->chip, DMAC_CHEN);238if (chan->chip->dw->hdata->reg_map_8_channels) {239val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |240BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;241} else {242val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |243BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;244}245axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);246}247}248249static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)250{251u64 val;252253if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16)254val = axi_dma_ioread64(chan->chip, DMAC_CHEN);255else256val = axi_dma_ioread32(chan->chip, DMAC_CHEN);257258if (chan->id >= DMAC_CHAN_16)259return !!(val & ((u64)(BIT(chan->id) >> DMAC_CHAN_16) << DMAC_CHAN_BLOCK_SHIFT));260else261return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));262}263264static void axi_dma_hw_init(struct axi_dma_chip *chip)265{266int ret;267u32 i;268269for (i = 0; i < chip->dw->hdata->nr_channels; i++) {270axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);271axi_chan_disable(&chip->dw->chan[i]);272}273ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64));274if (ret)275dev_warn(chip->dev, "Unable to set coherent mask\n");276}277278static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,279dma_addr_t dst, size_t len)280{281u32 max_width = chan->chip->dw->hdata->m_data_width;282283return __ffs(src | dst | len | BIT(max_width));284}285286static inline const char *axi_chan_name(struct axi_dma_chan *chan)287{288return dma_chan_name(&chan->vc.chan);289}290291static struct axi_dma_desc *axi_desc_alloc(u32 num)292{293struct axi_dma_desc *desc;294295desc = kzalloc(sizeof(*desc), GFP_NOWAIT);296if (!desc)297return NULL;298299desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);300if (!desc->hw_desc) {301kfree(desc);302return NULL;303}304desc->nr_hw_descs = num;305306return desc;307}308309static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,310dma_addr_t *addr)311{312struct axi_dma_lli *lli;313dma_addr_t phys;314315lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);316if (unlikely(!lli)) {317dev_err(chan2dev(chan), "%s: not enough descriptors available\n",318axi_chan_name(chan));319return NULL;320}321322atomic_inc(&chan->descs_allocated);323*addr = phys;324325return lli;326}327328static void axi_desc_put(struct axi_dma_desc *desc)329{330struct axi_dma_chan *chan = desc->chan;331int count = desc->nr_hw_descs;332struct axi_dma_hw_desc *hw_desc;333int descs_put;334335for (descs_put = 0; descs_put < count; descs_put++) {336hw_desc = &desc->hw_desc[descs_put];337dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);338}339340kfree(desc->hw_desc);341kfree(desc);342atomic_sub(descs_put, &chan->descs_allocated);343dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",344axi_chan_name(chan), descs_put,345atomic_read(&chan->descs_allocated));346}347348static void vchan_desc_put(struct virt_dma_desc *vdesc)349{350axi_desc_put(vd_to_axi_desc(vdesc));351}352353static enum dma_status354dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,355struct dma_tx_state *txstate)356{357struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);358struct virt_dma_desc *vdesc;359enum dma_status status;360u32 completed_length;361unsigned long flags;362u32 completed_blocks;363size_t bytes = 0;364u32 length;365u32 len;366367status = dma_cookie_status(dchan, cookie, txstate);368if (status == DMA_COMPLETE || !txstate)369return status;370371spin_lock_irqsave(&chan->vc.lock, flags);372373vdesc = vchan_find_desc(&chan->vc, cookie);374if (vdesc) {375length = vd_to_axi_desc(vdesc)->length;376completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;377len = vd_to_axi_desc(vdesc)->hw_desc[0].len;378completed_length = completed_blocks * len;379bytes = length - completed_length;380}381382spin_unlock_irqrestore(&chan->vc.lock, flags);383dma_set_residue(txstate, bytes);384385return status;386}387388static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)389{390desc->lli->llp = cpu_to_le64(adr);391}392393static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)394{395axi_chan_iowrite64(chan, CH_LLP, adr);396}397398static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)399{400u32 offset = DMAC_APB_BYTE_WR_CH_EN;401u32 reg_width, val;402403if (!chan->chip->apb_regs) {404dev_dbg(chan->chip->dev, "apb_regs not initialized\n");405return;406}407408reg_width = __ffs(chan->config.dst_addr_width);409if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)410offset = DMAC_APB_HALFWORD_WR_CH_EN;411412val = ioread32(chan->chip->apb_regs + offset);413414if (set)415val |= BIT(chan->id);416else417val &= ~BIT(chan->id);418419iowrite32(val, chan->chip->apb_regs + offset);420}421/* Called in chan locked context */422static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,423struct axi_dma_desc *first)424{425u32 priority = chan->chip->dw->hdata->priority[chan->id];426struct axi_dma_chan_config config = {};427u32 irq_mask;428u8 lms = 0; /* Select AXI0 master for LLI fetching */429430if (unlikely(axi_chan_is_hw_enable(chan))) {431dev_err(chan2dev(chan), "%s is non-idle!\n",432axi_chan_name(chan));433434return;435}436437axi_dma_enable(chan->chip);438439config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;440config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;441config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;442config.prior = priority;443config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;444config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;445switch (chan->direction) {446case DMA_MEM_TO_DEV:447dw_axi_dma_set_byte_halfword(chan, true);448config.tt_fc = chan->config.device_fc ?449DWAXIDMAC_TT_FC_MEM_TO_PER_DST :450DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC;451if (chan->chip->apb_regs)452config.dst_per = chan->id;453else454config.dst_per = chan->hw_handshake_num;455break;456case DMA_DEV_TO_MEM:457config.tt_fc = chan->config.device_fc ?458DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :459DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC;460if (chan->chip->apb_regs)461config.src_per = chan->id;462else463config.src_per = chan->hw_handshake_num;464break;465default:466break;467}468axi_chan_config_write(chan, &config);469470write_chan_llp(chan, first->hw_desc[0].llp | lms);471472irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;473axi_chan_irq_sig_set(chan, irq_mask);474475/* Generate 'suspend' status but don't generate interrupt */476irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;477axi_chan_irq_set(chan, irq_mask);478479axi_chan_enable(chan);480}481482static void axi_chan_start_first_queued(struct axi_dma_chan *chan)483{484struct axi_dma_desc *desc;485struct virt_dma_desc *vd;486487vd = vchan_next_desc(&chan->vc);488if (!vd)489return;490491desc = vd_to_axi_desc(vd);492dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),493vd->tx.cookie);494axi_chan_block_xfer_start(chan, desc);495}496497static void dma_chan_issue_pending(struct dma_chan *dchan)498{499struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);500unsigned long flags;501502spin_lock_irqsave(&chan->vc.lock, flags);503if (vchan_issue_pending(&chan->vc))504axi_chan_start_first_queued(chan);505spin_unlock_irqrestore(&chan->vc.lock, flags);506}507508static void dw_axi_dma_synchronize(struct dma_chan *dchan)509{510struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);511512vchan_synchronize(&chan->vc);513}514515static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)516{517struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);518519/* ASSERT: channel is idle */520if (axi_chan_is_hw_enable(chan)) {521dev_err(chan2dev(chan), "%s is non-idle!\n",522axi_chan_name(chan));523return -EBUSY;524}525526/* LLI address must be aligned to a 64-byte boundary */527chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),528chan->chip->dev,529sizeof(struct axi_dma_lli),53064, 0);531if (!chan->desc_pool) {532dev_err(chan2dev(chan), "No memory for descriptors\n");533return -ENOMEM;534}535dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));536537pm_runtime_get(chan->chip->dev);538539return 0;540}541542static void dma_chan_free_chan_resources(struct dma_chan *dchan)543{544struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);545546/* ASSERT: channel is idle */547if (axi_chan_is_hw_enable(chan))548dev_err(dchan2dev(dchan), "%s is non-idle!\n",549axi_chan_name(chan));550551axi_chan_disable(chan);552axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);553554vchan_free_chan_resources(&chan->vc);555556dma_pool_destroy(chan->desc_pool);557chan->desc_pool = NULL;558dev_vdbg(dchan2dev(dchan),559"%s: free resources, descriptor still allocated: %u\n",560axi_chan_name(chan), atomic_read(&chan->descs_allocated));561562pm_runtime_put(chan->chip->dev);563}564565static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)566{567struct axi_dma_chip *chip = chan->chip;568unsigned long reg_value, val;569570if (!chip->apb_regs) {571dev_err(chip->dev, "apb_regs not initialized\n");572return;573}574575/*576* An unused DMA channel has a default value of 0x3F.577* Lock the DMA channel by assign a handshake number to the channel.578* Unlock the DMA channel by assign 0x3F to the channel.579*/580if (set)581val = chan->hw_handshake_num;582else583val = UNUSED_CHANNEL;584585reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);586587/* Channel is already allocated, set handshake as per channel ID */588/* 64 bit write should handle for 8 channels */589590reg_value &= ~(DMA_APB_HS_SEL_MASK <<591(chan->id * DMA_APB_HS_SEL_BIT_SIZE));592reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));593lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);594595return;596}597598/*599* If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI600* as 1, it understands that the current block is the final block in the601* transfer and completes the DMA transfer operation at the end of current602* block transfer.603*/604static void set_desc_last(struct axi_dma_hw_desc *desc)605{606u32 val;607608val = le32_to_cpu(desc->lli->ctl_hi);609val |= CH_CTL_H_LLI_LAST;610desc->lli->ctl_hi = cpu_to_le32(val);611}612613static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)614{615desc->lli->sar = cpu_to_le64(adr);616}617618static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)619{620desc->lli->dar = cpu_to_le64(adr);621}622623static void set_desc_src_master(struct axi_dma_hw_desc *desc)624{625u32 val;626627/* Select AXI0 for source master */628val = le32_to_cpu(desc->lli->ctl_lo);629val &= ~CH_CTL_L_SRC_MAST;630desc->lli->ctl_lo = cpu_to_le32(val);631}632633static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,634struct axi_dma_desc *desc)635{636u32 val;637638/* Select AXI1 for source master if available */639val = le32_to_cpu(hw_desc->lli->ctl_lo);640if (desc->chan->chip->dw->hdata->nr_masters > 1)641val |= CH_CTL_L_DST_MAST;642else643val &= ~CH_CTL_L_DST_MAST;644645hw_desc->lli->ctl_lo = cpu_to_le32(val);646}647648static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,649struct axi_dma_hw_desc *hw_desc,650dma_addr_t mem_addr, size_t len)651{652unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);653unsigned int reg_width;654unsigned int mem_width;655dma_addr_t device_addr;656size_t axi_block_ts;657size_t block_ts;658u32 ctllo, ctlhi;659u32 burst_len;660661axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];662663mem_width = __ffs(data_width | mem_addr | len);664if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)665mem_width = DWAXIDMAC_TRANS_WIDTH_32;666667if (!IS_ALIGNED(mem_addr, 4)) {668dev_err(chan->chip->dev, "invalid buffer alignment\n");669return -EINVAL;670}671672switch (chan->direction) {673case DMA_MEM_TO_DEV:674reg_width = __ffs(chan->config.dst_addr_width);675device_addr = chan->config.dst_addr;676ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |677mem_width << CH_CTL_L_SRC_WIDTH_POS |678DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |679DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;680block_ts = len >> mem_width;681break;682case DMA_DEV_TO_MEM:683reg_width = __ffs(chan->config.src_addr_width);684device_addr = chan->config.src_addr;685ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |686mem_width << CH_CTL_L_DST_WIDTH_POS |687DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |688DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;689block_ts = len >> reg_width;690break;691default:692return -EINVAL;693}694695if (block_ts > axi_block_ts)696return -EINVAL;697698hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);699if (unlikely(!hw_desc->lli))700return -ENOMEM;701702ctlhi = CH_CTL_H_LLI_VALID;703704if (chan->chip->dw->hdata->restrict_axi_burst_len) {705burst_len = chan->chip->dw->hdata->axi_rw_burst_len;706ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |707burst_len << CH_CTL_H_ARLEN_POS |708burst_len << CH_CTL_H_AWLEN_POS;709}710711hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);712713if (chan->direction == DMA_MEM_TO_DEV) {714write_desc_sar(hw_desc, mem_addr);715write_desc_dar(hw_desc, device_addr);716} else {717write_desc_sar(hw_desc, device_addr);718write_desc_dar(hw_desc, mem_addr);719}720721hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);722723ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |724DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;725hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);726727set_desc_src_master(hw_desc);728729hw_desc->len = len;730return 0;731}732733static size_t calculate_block_len(struct axi_dma_chan *chan,734dma_addr_t dma_addr, size_t buf_len,735enum dma_transfer_direction direction)736{737u32 data_width, reg_width, mem_width;738size_t axi_block_ts, block_len;739740axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];741742switch (direction) {743case DMA_MEM_TO_DEV:744data_width = BIT(chan->chip->dw->hdata->m_data_width);745mem_width = __ffs(data_width | dma_addr | buf_len);746if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)747mem_width = DWAXIDMAC_TRANS_WIDTH_32;748749block_len = axi_block_ts << mem_width;750break;751case DMA_DEV_TO_MEM:752reg_width = __ffs(chan->config.src_addr_width);753block_len = axi_block_ts << reg_width;754break;755default:756block_len = 0;757}758759return block_len;760}761762static struct dma_async_tx_descriptor *763dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,764size_t buf_len, size_t period_len,765enum dma_transfer_direction direction,766unsigned long flags)767{768struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);769struct axi_dma_hw_desc *hw_desc = NULL;770struct axi_dma_desc *desc = NULL;771dma_addr_t src_addr = dma_addr;772u32 num_periods, num_segments;773size_t axi_block_len;774u32 total_segments;775u32 segment_len;776unsigned int i;777int status;778u64 llp = 0;779u8 lms = 0; /* Select AXI0 master for LLI fetching */780781num_periods = buf_len / period_len;782783axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);784if (axi_block_len == 0)785return NULL;786787num_segments = DIV_ROUND_UP(period_len, axi_block_len);788segment_len = DIV_ROUND_UP(period_len, num_segments);789790total_segments = num_periods * num_segments;791792desc = axi_desc_alloc(total_segments);793if (unlikely(!desc))794goto err_desc_get;795796chan->direction = direction;797desc->chan = chan;798chan->cyclic = true;799desc->length = 0;800desc->period_len = period_len;801802for (i = 0; i < total_segments; i++) {803hw_desc = &desc->hw_desc[i];804805status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,806segment_len);807if (status < 0)808goto err_desc_get;809810desc->length += hw_desc->len;811/* Set end-of-link to the linked descriptor, so that cyclic812* callback function can be triggered during interrupt.813*/814set_desc_last(hw_desc);815816src_addr += segment_len;817}818819llp = desc->hw_desc[0].llp;820821/* Managed transfer list */822do {823hw_desc = &desc->hw_desc[--total_segments];824write_desc_llp(hw_desc, llp | lms);825llp = hw_desc->llp;826} while (total_segments);827828dw_axi_dma_set_hw_channel(chan, true);829830return vchan_tx_prep(&chan->vc, &desc->vd, flags);831832err_desc_get:833if (desc)834axi_desc_put(desc);835836return NULL;837}838839static struct dma_async_tx_descriptor *840dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,841unsigned int sg_len,842enum dma_transfer_direction direction,843unsigned long flags, void *context)844{845struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);846struct axi_dma_hw_desc *hw_desc = NULL;847struct axi_dma_desc *desc = NULL;848u32 num_segments, segment_len;849unsigned int loop = 0;850struct scatterlist *sg;851size_t axi_block_len;852u32 len, num_sgs = 0;853unsigned int i;854dma_addr_t mem;855int status;856u64 llp = 0;857u8 lms = 0; /* Select AXI0 master for LLI fetching */858859if (unlikely(!is_slave_direction(direction) || !sg_len))860return NULL;861862mem = sg_dma_address(sgl);863len = sg_dma_len(sgl);864865axi_block_len = calculate_block_len(chan, mem, len, direction);866if (axi_block_len == 0)867return NULL;868869for_each_sg(sgl, sg, sg_len, i)870num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);871872desc = axi_desc_alloc(num_sgs);873if (unlikely(!desc))874goto err_desc_get;875876desc->chan = chan;877desc->length = 0;878chan->direction = direction;879880for_each_sg(sgl, sg, sg_len, i) {881mem = sg_dma_address(sg);882len = sg_dma_len(sg);883num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);884segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);885886do {887hw_desc = &desc->hw_desc[loop++];888status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);889if (status < 0)890goto err_desc_get;891892desc->length += hw_desc->len;893len -= segment_len;894mem += segment_len;895} while (len >= segment_len);896}897898/* Set end-of-link to the last link descriptor of list */899set_desc_last(&desc->hw_desc[num_sgs - 1]);900901/* Managed transfer list */902do {903hw_desc = &desc->hw_desc[--num_sgs];904write_desc_llp(hw_desc, llp | lms);905llp = hw_desc->llp;906} while (num_sgs);907908dw_axi_dma_set_hw_channel(chan, true);909910return vchan_tx_prep(&chan->vc, &desc->vd, flags);911912err_desc_get:913if (desc)914axi_desc_put(desc);915916return NULL;917}918919static struct dma_async_tx_descriptor *920dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,921dma_addr_t src_adr, size_t len, unsigned long flags)922{923struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);924size_t block_ts, max_block_ts, xfer_len;925struct axi_dma_hw_desc *hw_desc = NULL;926struct axi_dma_desc *desc = NULL;927u32 xfer_width, reg, num;928u64 llp = 0;929u8 lms = 0; /* Select AXI0 master for LLI fetching */930931dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",932axi_chan_name(chan), &src_adr, &dst_adr, len, flags);933934max_block_ts = chan->chip->dw->hdata->block_size[chan->id];935xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);936num = DIV_ROUND_UP(len, max_block_ts << xfer_width);937desc = axi_desc_alloc(num);938if (unlikely(!desc))939goto err_desc_get;940941desc->chan = chan;942num = 0;943desc->length = 0;944while (len) {945xfer_len = len;946947hw_desc = &desc->hw_desc[num];948/*949* Take care for the alignment.950* Actually source and destination widths can be different, but951* make them same to be simpler.952*/953xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);954955/*956* block_ts indicates the total number of data of width957* to be transferred in a DMA block transfer.958* BLOCK_TS register should be set to block_ts - 1959*/960block_ts = xfer_len >> xfer_width;961if (block_ts > max_block_ts) {962block_ts = max_block_ts;963xfer_len = max_block_ts << xfer_width;964}965966hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);967if (unlikely(!hw_desc->lli))968goto err_desc_get;969970write_desc_sar(hw_desc, src_adr);971write_desc_dar(hw_desc, dst_adr);972hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);973974reg = CH_CTL_H_LLI_VALID;975if (chan->chip->dw->hdata->restrict_axi_burst_len) {976u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;977978reg |= (CH_CTL_H_ARLEN_EN |979burst_len << CH_CTL_H_ARLEN_POS |980CH_CTL_H_AWLEN_EN |981burst_len << CH_CTL_H_AWLEN_POS);982}983hw_desc->lli->ctl_hi = cpu_to_le32(reg);984985reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |986DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |987xfer_width << CH_CTL_L_DST_WIDTH_POS |988xfer_width << CH_CTL_L_SRC_WIDTH_POS |989DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |990DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);991hw_desc->lli->ctl_lo = cpu_to_le32(reg);992993set_desc_src_master(hw_desc);994set_desc_dest_master(hw_desc, desc);995996hw_desc->len = xfer_len;997desc->length += hw_desc->len;998/* update the length and addresses for the next loop cycle */999len -= xfer_len;1000dst_adr += xfer_len;1001src_adr += xfer_len;1002num++;1003}10041005/* Set end-of-link to the last link descriptor of list */1006set_desc_last(&desc->hw_desc[num - 1]);1007/* Managed transfer list */1008do {1009hw_desc = &desc->hw_desc[--num];1010write_desc_llp(hw_desc, llp | lms);1011llp = hw_desc->llp;1012} while (num);10131014return vchan_tx_prep(&chan->vc, &desc->vd, flags);10151016err_desc_get:1017if (desc)1018axi_desc_put(desc);1019return NULL;1020}10211022static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,1023struct dma_slave_config *config)1024{1025struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);10261027memcpy(&chan->config, config, sizeof(*config));10281029return 0;1030}10311032static void axi_chan_dump_lli(struct axi_dma_chan *chan,1033struct axi_dma_hw_desc *desc)1034{1035if (!desc->lli) {1036dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");1037return;1038}10391040dev_err(dchan2dev(&chan->vc.chan),1041"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",1042le64_to_cpu(desc->lli->sar),1043le64_to_cpu(desc->lli->dar),1044le64_to_cpu(desc->lli->llp),1045le32_to_cpu(desc->lli->block_ts_lo),1046le32_to_cpu(desc->lli->ctl_hi),1047le32_to_cpu(desc->lli->ctl_lo));1048}10491050static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,1051struct axi_dma_desc *desc_head)1052{1053int count = atomic_read(&chan->descs_allocated);1054int i;10551056for (i = 0; i < count; i++)1057axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);1058}10591060static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)1061{1062struct virt_dma_desc *vd;1063unsigned long flags;10641065spin_lock_irqsave(&chan->vc.lock, flags);10661067axi_chan_disable(chan);10681069/* The bad descriptor currently is in the head of vc list */1070vd = vchan_next_desc(&chan->vc);1071if (!vd) {1072dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",1073axi_chan_name(chan));1074goto out;1075}1076/* Remove the completed descriptor from issued list */1077list_del(&vd->node);10781079/* WARN about bad descriptor */1080dev_err(chan2dev(chan),1081"Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",1082axi_chan_name(chan), vd->tx.cookie, status);1083axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));10841085vchan_cookie_complete(vd);10861087/* Try to restart the controller */1088axi_chan_start_first_queued(chan);10891090out:1091spin_unlock_irqrestore(&chan->vc.lock, flags);1092}10931094static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)1095{1096int count = atomic_read(&chan->descs_allocated);1097struct axi_dma_hw_desc *hw_desc;1098struct axi_dma_desc *desc;1099struct virt_dma_desc *vd;1100unsigned long flags;1101u64 llp;1102int i;11031104spin_lock_irqsave(&chan->vc.lock, flags);1105if (unlikely(axi_chan_is_hw_enable(chan))) {1106dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",1107axi_chan_name(chan));1108axi_chan_disable(chan);1109}11101111/* The completed descriptor currently is in the head of vc list */1112vd = vchan_next_desc(&chan->vc);1113if (!vd) {1114dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",1115axi_chan_name(chan));1116goto out;1117}11181119if (chan->cyclic) {1120desc = vd_to_axi_desc(vd);1121if (desc) {1122llp = lo_hi_readq(chan->chan_regs + CH_LLP);1123for (i = 0; i < count; i++) {1124hw_desc = &desc->hw_desc[i];1125if (hw_desc->llp == llp) {1126axi_chan_irq_clear(chan, hw_desc->lli->status_lo);1127hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;1128desc->completed_blocks = i;11291130if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)1131vchan_cyclic_callback(vd);1132break;1133}1134}11351136axi_chan_enable(chan);1137}1138} else {1139/* Remove the completed descriptor from issued list before completing */1140list_del(&vd->node);1141vchan_cookie_complete(vd);1142}11431144out:1145spin_unlock_irqrestore(&chan->vc.lock, flags);1146}11471148static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)1149{1150struct axi_dma_chip *chip = dev_id;1151struct dw_axi_dma *dw = chip->dw;1152struct axi_dma_chan *chan;11531154u32 status, i;11551156/* Disable DMAC interrupts. We'll enable them after processing channels */1157axi_dma_irq_disable(chip);11581159/* Poll, clear and process every channel interrupt status */1160for (i = 0; i < dw->hdata->nr_channels; i++) {1161chan = &dw->chan[i];1162status = axi_chan_irq_read(chan);1163axi_chan_irq_clear(chan, status);11641165dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",1166axi_chan_name(chan), i, status);11671168if (status & DWAXIDMAC_IRQ_ALL_ERR)1169axi_chan_handle_err(chan, status);1170else if (status & DWAXIDMAC_IRQ_DMA_TRF)1171axi_chan_block_xfer_complete(chan);1172}11731174/* Re-enable interrupts */1175axi_dma_irq_enable(chip);11761177return IRQ_HANDLED;1178}11791180static int dma_chan_terminate_all(struct dma_chan *dchan)1181{1182struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);1183u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;1184unsigned long flags;1185u32 val;1186int ret;1187LIST_HEAD(head);11881189axi_chan_disable(chan);11901191ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,1192!(val & chan_active), 1000, 50000);1193if (ret == -ETIMEDOUT)1194dev_warn(dchan2dev(dchan),1195"%s failed to stop\n", axi_chan_name(chan));11961197if (chan->direction != DMA_MEM_TO_MEM)1198dw_axi_dma_set_hw_channel(chan, false);1199if (chan->direction == DMA_MEM_TO_DEV)1200dw_axi_dma_set_byte_halfword(chan, false);12011202spin_lock_irqsave(&chan->vc.lock, flags);12031204vchan_get_all_descriptors(&chan->vc, &head);12051206chan->cyclic = false;1207spin_unlock_irqrestore(&chan->vc.lock, flags);12081209vchan_dma_desc_free_list(&chan->vc, &head);12101211dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));12121213return 0;1214}12151216static int dma_chan_pause(struct dma_chan *dchan)1217{1218struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);1219unsigned long flags;1220unsigned int timeout = 20; /* timeout iterations */1221u64 val;12221223spin_lock_irqsave(&chan->vc.lock, flags);12241225if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {1226val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);1227if (chan->id >= DMAC_CHAN_16) {1228val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)1229<< (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |1230(u64)(BIT(chan->id) >> DMAC_CHAN_16)1231<< (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);1232} else {1233val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |1234BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;1235}1236axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);1237} else {1238if (chan->chip->dw->hdata->reg_map_8_channels) {1239val = axi_dma_ioread32(chan->chip, DMAC_CHEN);1240val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |1241BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;1242axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);1243} else {1244val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);1245val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |1246BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;1247axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);1248}1249}12501251do {1252if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)1253break;12541255udelay(2);1256} while (--timeout);12571258axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);12591260chan->is_paused = true;12611262spin_unlock_irqrestore(&chan->vc.lock, flags);12631264return timeout ? 0 : -EAGAIN;1265}12661267/* Called in chan locked context */1268static inline void axi_chan_resume(struct axi_dma_chan *chan)1269{1270u64 val;12711272if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {1273val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);1274if (chan->id >= DMAC_CHAN_16) {1275val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)1276<< (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT));1277val |= ((u64)(BIT(chan->id) >> DMAC_CHAN_16)1278<< (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT));1279} else {1280val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);1281val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);1282}1283axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);1284} else {1285if (chan->chip->dw->hdata->reg_map_8_channels) {1286val = axi_dma_ioread32(chan->chip, DMAC_CHEN);1287val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);1288val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);1289axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);1290} else {1291val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);1292val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);1293val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);1294axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);1295}1296}12971298chan->is_paused = false;1299}13001301static int dma_chan_resume(struct dma_chan *dchan)1302{1303struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);1304unsigned long flags;13051306spin_lock_irqsave(&chan->vc.lock, flags);13071308if (chan->is_paused)1309axi_chan_resume(chan);13101311spin_unlock_irqrestore(&chan->vc.lock, flags);13121313return 0;1314}13151316static int axi_dma_suspend(struct axi_dma_chip *chip)1317{1318axi_dma_irq_disable(chip);1319axi_dma_disable(chip);13201321clk_disable_unprepare(chip->core_clk);1322clk_disable_unprepare(chip->cfgr_clk);13231324return 0;1325}13261327static int axi_dma_resume(struct axi_dma_chip *chip)1328{1329int ret;13301331ret = clk_prepare_enable(chip->cfgr_clk);1332if (ret < 0)1333return ret;13341335ret = clk_prepare_enable(chip->core_clk);1336if (ret < 0)1337return ret;13381339axi_dma_enable(chip);1340axi_dma_irq_enable(chip);13411342return 0;1343}13441345static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)1346{1347struct axi_dma_chip *chip = dev_get_drvdata(dev);13481349return axi_dma_suspend(chip);1350}13511352static int __maybe_unused axi_dma_runtime_resume(struct device *dev)1353{1354struct axi_dma_chip *chip = dev_get_drvdata(dev);13551356return axi_dma_resume(chip);1357}13581359static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,1360struct of_dma *ofdma)1361{1362struct dw_axi_dma *dw = ofdma->of_dma_data;1363struct axi_dma_chan *chan;1364struct dma_chan *dchan;13651366dchan = dma_get_any_slave_channel(&dw->dma);1367if (!dchan)1368return NULL;13691370chan = dchan_to_axi_dma_chan(dchan);1371chan->hw_handshake_num = dma_spec->args[0];1372return dchan;1373}13741375static int parse_device_properties(struct axi_dma_chip *chip)1376{1377struct device *dev = chip->dev;1378u32 tmp, carr[DMAC_MAX_CHANNELS];1379int ret;13801381ret = device_property_read_u32(dev, "dma-channels", &tmp);1382if (ret)1383return ret;1384if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)1385return -EINVAL;13861387chip->dw->hdata->nr_channels = tmp;1388if (tmp <= DMA_REG_MAP_CH_REF)1389chip->dw->hdata->reg_map_8_channels = true;13901391ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);1392if (ret)1393return ret;1394if (tmp == 0 || tmp > DMAC_MAX_MASTERS)1395return -EINVAL;13961397chip->dw->hdata->nr_masters = tmp;13981399ret = device_property_read_u32(dev, "snps,data-width", &tmp);1400if (ret)1401return ret;1402if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)1403return -EINVAL;14041405chip->dw->hdata->m_data_width = tmp;14061407ret = device_property_read_u32_array(dev, "snps,block-size", carr,1408chip->dw->hdata->nr_channels);1409if (ret)1410return ret;1411for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {1412if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)1413return -EINVAL;14141415chip->dw->hdata->block_size[tmp] = carr[tmp];1416}14171418ret = device_property_read_u32_array(dev, "snps,priority", carr,1419chip->dw->hdata->nr_channels);1420if (ret)1421return ret;1422/* Priority value must be programmed within [0:nr_channels-1] range */1423for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {1424if (carr[tmp] >= chip->dw->hdata->nr_channels)1425return -EINVAL;14261427chip->dw->hdata->priority[tmp] = carr[tmp];1428}14291430/* axi-max-burst-len is optional property */1431ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);1432if (!ret) {1433if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)1434return -EINVAL;1435if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)1436return -EINVAL;14371438chip->dw->hdata->restrict_axi_burst_len = true;1439chip->dw->hdata->axi_rw_burst_len = tmp;1440}14411442return 0;1443}14441445static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip)1446{1447int irq_count = platform_irq_count(pdev);1448int ret;14491450for (int i = 0; i < irq_count; i++) {1451chip->irq[i] = platform_get_irq(pdev, i);1452if (chip->irq[i] < 0)1453return chip->irq[i];1454ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt,1455IRQF_SHARED, KBUILD_MODNAME, chip);1456if (ret < 0)1457return ret;1458}14591460return 0;1461}14621463static int dw_probe(struct platform_device *pdev)1464{1465struct axi_dma_chip *chip;1466struct dw_axi_dma *dw;1467struct dw_axi_dma_hcfg *hdata;1468struct reset_control *resets;1469unsigned int flags;1470u32 i;1471int ret;14721473chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);1474if (!chip)1475return -ENOMEM;14761477dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);1478if (!dw)1479return -ENOMEM;14801481hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);1482if (!hdata)1483return -ENOMEM;14841485chip->dw = dw;1486chip->dev = &pdev->dev;1487chip->dw->hdata = hdata;14881489chip->regs = devm_platform_ioremap_resource(pdev, 0);1490if (IS_ERR(chip->regs))1491return PTR_ERR(chip->regs);14921493flags = (uintptr_t)of_device_get_match_data(&pdev->dev);1494if (flags & AXI_DMA_FLAG_HAS_APB_REGS) {1495chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);1496if (IS_ERR(chip->apb_regs))1497return PTR_ERR(chip->apb_regs);1498}14991500if (flags & AXI_DMA_FLAG_HAS_RESETS) {1501resets = devm_reset_control_array_get_exclusive(&pdev->dev);1502if (IS_ERR(resets))1503return PTR_ERR(resets);15041505ret = reset_control_deassert(resets);1506if (ret)1507return ret;1508}15091510chip->dw->hdata->use_cfg2 = !!(flags & AXI_DMA_FLAG_USE_CFG2);15111512chip->core_clk = devm_clk_get(chip->dev, "core-clk");1513if (IS_ERR(chip->core_clk))1514return PTR_ERR(chip->core_clk);15151516chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");1517if (IS_ERR(chip->cfgr_clk))1518return PTR_ERR(chip->cfgr_clk);15191520ret = parse_device_properties(chip);1521if (ret)1522return ret;15231524dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,1525sizeof(*dw->chan), GFP_KERNEL);1526if (!dw->chan)1527return -ENOMEM;15281529ret = axi_req_irqs(pdev, chip);1530if (ret)1531return ret;15321533INIT_LIST_HEAD(&dw->dma.channels);1534for (i = 0; i < hdata->nr_channels; i++) {1535struct axi_dma_chan *chan = &dw->chan[i];15361537chan->chip = chip;1538chan->id = i;1539chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;1540atomic_set(&chan->descs_allocated, 0);15411542chan->vc.desc_free = vchan_desc_put;1543vchan_init(&chan->vc, &dw->dma);1544}15451546/* Set capabilities */1547dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);1548dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);1549dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);15501551/* DMA capabilities */1552dw->dma.max_burst = hdata->axi_rw_burst_len;1553dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;1554dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;1555dw->dma.directions = BIT(DMA_MEM_TO_MEM);1556dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);1557dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;15581559dw->dma.dev = chip->dev;1560dw->dma.device_tx_status = dma_chan_tx_status;1561dw->dma.device_issue_pending = dma_chan_issue_pending;1562dw->dma.device_terminate_all = dma_chan_terminate_all;1563dw->dma.device_pause = dma_chan_pause;1564dw->dma.device_resume = dma_chan_resume;15651566dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;1567dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;15681569dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;1570dw->dma.device_synchronize = dw_axi_dma_synchronize;1571dw->dma.device_config = dw_axi_dma_chan_slave_config;1572dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;1573dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;15741575/*1576* Synopsis DesignWare AxiDMA datasheet mentioned Maximum1577* supported blocks is 1024. Device register width is 4 bytes.1578* Therefore, set constraint to 1024 * 4.1579*/1580dw->dma.dev->dma_parms = &dw->dma_parms;1581dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);1582platform_set_drvdata(pdev, chip);15831584pm_runtime_enable(chip->dev);15851586/*1587* We can't just call pm_runtime_get here instead of1588* pm_runtime_get_noresume + axi_dma_resume because we need1589* driver to work also without Runtime PM.1590*/1591pm_runtime_get_noresume(chip->dev);1592ret = axi_dma_resume(chip);1593if (ret < 0)1594goto err_pm_disable;15951596axi_dma_hw_init(chip);15971598pm_runtime_put(chip->dev);15991600ret = dmaenginem_async_device_register(&dw->dma);1601if (ret)1602goto err_pm_disable;16031604/* Register with OF helpers for DMA lookups */1605ret = of_dma_controller_register(pdev->dev.of_node,1606dw_axi_dma_of_xlate, dw);1607if (ret < 0)1608dev_warn(&pdev->dev,1609"Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");16101611dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",1612dw->hdata->nr_channels);16131614return 0;16151616err_pm_disable:1617pm_runtime_disable(chip->dev);16181619return ret;1620}16211622static void dw_remove(struct platform_device *pdev)1623{1624struct axi_dma_chip *chip = platform_get_drvdata(pdev);1625struct dw_axi_dma *dw = chip->dw;1626struct axi_dma_chan *chan, *_chan;1627u32 i;16281629/* Enable clk before accessing to registers */1630clk_prepare_enable(chip->cfgr_clk);1631clk_prepare_enable(chip->core_clk);1632axi_dma_irq_disable(chip);1633for (i = 0; i < dw->hdata->nr_channels; i++) {1634axi_chan_disable(&chip->dw->chan[i]);1635axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);1636}1637axi_dma_disable(chip);16381639pm_runtime_disable(chip->dev);1640axi_dma_suspend(chip);16411642for (i = 0; i < DMAC_MAX_CHANNELS; i++)1643if (chip->irq[i] > 0)1644devm_free_irq(chip->dev, chip->irq[i], chip);16451646of_dma_controller_free(chip->dev->of_node);16471648list_for_each_entry_safe(chan, _chan, &dw->dma.channels,1649vc.chan.device_node) {1650list_del(&chan->vc.chan.device_node);1651tasklet_kill(&chan->vc.task);1652}1653}16541655static const struct dev_pm_ops dw_axi_dma_pm_ops = {1656SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)1657};16581659static const struct of_device_id dw_dma_of_id_table[] = {1660{1661.compatible = "snps,axi-dma-1.01a"1662}, {1663.compatible = "intel,kmb-axi-dma",1664.data = (void *)AXI_DMA_FLAG_HAS_APB_REGS,1665}, {1666.compatible = "starfive,jh7110-axi-dma",1667.data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),1668}, {1669.compatible = "starfive,jh8100-axi-dma",1670.data = (void *)AXI_DMA_FLAG_HAS_RESETS,1671},1672{}1673};1674MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);16751676static struct platform_driver dw_driver = {1677.probe = dw_probe,1678.remove = dw_remove,1679.driver = {1680.name = KBUILD_MODNAME,1681.of_match_table = dw_dma_of_id_table,1682.pm = &dw_axi_dma_pm_ops,1683},1684};1685module_platform_driver(dw_driver);16861687MODULE_LICENSE("GPL v2");1688MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");1689MODULE_AUTHOR("Eugeniy Paltsev <[email protected]>");169016911692