Path: blob/master/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
54063 views
// SPDX-License-Identifier: GPL-2.01// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)23/*4* Synopsys DesignWare AXI DMA Controller driver.5*6* Author: Eugeniy Paltsev <[email protected]>7*/89#include <linux/bitops.h>10#include <linux/delay.h>11#include <linux/device.h>12#include <linux/dmaengine.h>13#include <linux/dmapool.h>14#include <linux/dma-mapping.h>15#include <linux/err.h>16#include <linux/interrupt.h>17#include <linux/io.h>18#include <linux/iopoll.h>19#include <linux/io-64-nonatomic-lo-hi.h>20#include <linux/kernel.h>21#include <linux/module.h>22#include <linux/of.h>23#include <linux/of_dma.h>24#include <linux/platform_device.h>25#include <linux/pm_runtime.h>26#include <linux/property.h>27#include <linux/reset.h>28#include <linux/slab.h>29#include <linux/types.h>3031#include "dw-axi-dmac.h"32#include "../dmaengine.h"33#include "../virt-dma.h"3435/*36* The set of bus widths supported by the DMA controller. DW AXI DMAC supports37* master data bus width up to 512 bits (for both AXI master interfaces), but38* it depends on IP block configuration.39*/40#define AXI_DMA_BUSWIDTHS \41(DMA_SLAVE_BUSWIDTH_1_BYTE | \42DMA_SLAVE_BUSWIDTH_2_BYTES | \43DMA_SLAVE_BUSWIDTH_4_BYTES | \44DMA_SLAVE_BUSWIDTH_8_BYTES | \45DMA_SLAVE_BUSWIDTH_16_BYTES | \46DMA_SLAVE_BUSWIDTH_32_BYTES | \47DMA_SLAVE_BUSWIDTH_64_BYTES)4849#define AXI_DMA_FLAG_HAS_APB_REGS BIT(0)50#define AXI_DMA_FLAG_HAS_RESETS BIT(1)51#define AXI_DMA_FLAG_USE_CFG2 BIT(2)5253static inline void54axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)55{56iowrite32(val, chip->regs + reg);57}5859static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)60{61return ioread32(chip->regs + reg);62}6364static inline void65axi_dma_iowrite64(struct axi_dma_chip *chip, u32 reg, u64 val)66{67iowrite64(val, chip->regs + reg);68}6970static inline u64 axi_dma_ioread64(struct axi_dma_chip *chip, u32 reg)71{72return ioread64(chip->regs + reg);73}7475static inline void76axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)77{78iowrite32(val, chan->chan_regs + reg);79}8081static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)82{83return ioread32(chan->chan_regs + reg);84}8586static inline void87axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)88{89/*90* We split one 64 bit write for two 32 bit write as some HW doesn't91* support 64 bit access.92*/93iowrite32(lower_32_bits(val), chan->chan_regs + reg);94iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);95}9697static inline void axi_chan_config_write(struct axi_dma_chan *chan,98struct axi_dma_chan_config *config)99{100u32 cfg_lo, cfg_hi;101102cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |103config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);104if (chan->chip->dw->hdata->reg_map_8_channels &&105!chan->chip->dw->hdata->use_cfg2) {106cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |107config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |108config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |109config->src_per << CH_CFG_H_SRC_PER_POS |110config->dst_per << CH_CFG_H_DST_PER_POS |111config->prior << CH_CFG_H_PRIORITY_POS;112} else {113cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS |114config->dst_per << CH_CFG2_L_DST_PER_POS;115cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS |116config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS |117config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS |118config->prior << CH_CFG2_H_PRIORITY_POS;119}120axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo);121axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi);122}123124static inline void axi_dma_disable(struct axi_dma_chip *chip)125{126u32 val;127128val = axi_dma_ioread32(chip, DMAC_CFG);129val &= ~DMAC_EN_MASK;130axi_dma_iowrite32(chip, DMAC_CFG, val);131}132133static inline void axi_dma_enable(struct axi_dma_chip *chip)134{135u32 val;136137val = axi_dma_ioread32(chip, DMAC_CFG);138val |= DMAC_EN_MASK;139axi_dma_iowrite32(chip, DMAC_CFG, val);140}141142static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)143{144u32 val;145146val = axi_dma_ioread32(chip, DMAC_CFG);147val &= ~INT_EN_MASK;148axi_dma_iowrite32(chip, DMAC_CFG, val);149}150151static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)152{153u32 val;154155val = axi_dma_ioread32(chip, DMAC_CFG);156val |= INT_EN_MASK;157axi_dma_iowrite32(chip, DMAC_CFG, val);158}159160static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)161{162u32 val;163164if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {165axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);166} else {167val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);168val &= ~irq_mask;169axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);170}171}172173static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)174{175axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);176}177178static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)179{180axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);181}182183static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)184{185axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);186}187188static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)189{190return axi_chan_ioread32(chan, CH_INTSTATUS);191}192193static inline void axi_chan_disable(struct axi_dma_chan *chan)194{195u64 val;196197if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {198val = axi_dma_ioread64(chan->chip, DMAC_CHEN);199if (chan->id >= DMAC_CHAN_16) {200val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)201<< (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT));202val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)203<< (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);204} else {205val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);206val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;207}208axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);209} else {210val = axi_dma_ioread32(chan->chip, DMAC_CHEN);211val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);212if (chan->chip->dw->hdata->reg_map_8_channels)213val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;214else215val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;216axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);217}218}219220static inline void axi_chan_enable(struct axi_dma_chan *chan)221{222u64 val;223224if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {225val = axi_dma_ioread64(chan->chip, DMAC_CHEN);226if (chan->id >= DMAC_CHAN_16) {227val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)228<< (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |229(u64)(BIT(chan->id) >> DMAC_CHAN_16)230<< (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);231} else {232val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |233BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;234}235axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);236} else {237val = axi_dma_ioread32(chan->chip, DMAC_CHEN);238if (chan->chip->dw->hdata->reg_map_8_channels) {239val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |240BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;241} else {242val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |243BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;244}245axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);246}247}248249static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)250{251u64 val;252253if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16)254val = axi_dma_ioread64(chan->chip, DMAC_CHEN);255else256val = axi_dma_ioread32(chan->chip, DMAC_CHEN);257258if (chan->id >= DMAC_CHAN_16)259return !!(val & ((u64)(BIT(chan->id) >> DMAC_CHAN_16) << DMAC_CHAN_BLOCK_SHIFT));260else261return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));262}263264static void axi_dma_hw_init(struct axi_dma_chip *chip)265{266int ret;267u32 i;268269for (i = 0; i < chip->dw->hdata->nr_channels; i++) {270axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);271axi_chan_disable(&chip->dw->chan[i]);272}273ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64));274if (ret)275dev_warn(chip->dev, "Unable to set coherent mask\n");276}277278static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,279dma_addr_t dst, size_t len)280{281u32 max_width = chan->chip->dw->hdata->m_data_width;282283return __ffs(src | dst | len | BIT(max_width));284}285286static inline const char *axi_chan_name(struct axi_dma_chan *chan)287{288return dma_chan_name(&chan->vc.chan);289}290291static struct axi_dma_desc *axi_desc_alloc(u32 num)292{293struct axi_dma_desc *desc;294295desc = kzalloc(sizeof(*desc), GFP_NOWAIT);296if (!desc)297return NULL;298299desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);300if (!desc->hw_desc) {301kfree(desc);302return NULL;303}304desc->nr_hw_descs = num;305306return desc;307}308309static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,310dma_addr_t *addr)311{312struct axi_dma_lli *lli;313dma_addr_t phys;314315lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);316if (unlikely(!lli)) {317dev_err(chan2dev(chan), "%s: not enough descriptors available\n",318axi_chan_name(chan));319return NULL;320}321322atomic_inc(&chan->descs_allocated);323*addr = phys;324325return lli;326}327328static void axi_desc_put(struct axi_dma_desc *desc)329{330struct axi_dma_chan *chan = desc->chan;331int count = desc->nr_hw_descs;332struct axi_dma_hw_desc *hw_desc;333int descs_put;334335for (descs_put = 0; descs_put < count; descs_put++) {336hw_desc = &desc->hw_desc[descs_put];337dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);338}339340kfree(desc->hw_desc);341kfree(desc);342atomic_sub(descs_put, &chan->descs_allocated);343dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",344axi_chan_name(chan), descs_put,345atomic_read(&chan->descs_allocated));346}347348static void vchan_desc_put(struct virt_dma_desc *vdesc)349{350axi_desc_put(vd_to_axi_desc(vdesc));351}352353static enum dma_status354dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,355struct dma_tx_state *txstate)356{357struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);358struct virt_dma_desc *vdesc;359enum dma_status status;360u32 completed_length;361unsigned long flags;362u32 completed_blocks;363size_t bytes = 0;364u32 length;365u32 len;366367status = dma_cookie_status(dchan, cookie, txstate);368if (status == DMA_COMPLETE || !txstate)369return status;370371spin_lock_irqsave(&chan->vc.lock, flags);372373vdesc = vchan_find_desc(&chan->vc, cookie);374if (vdesc) {375length = vd_to_axi_desc(vdesc)->length;376completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;377len = vd_to_axi_desc(vdesc)->hw_desc[0].len;378completed_length = completed_blocks * len;379bytes = length - completed_length;380}381382spin_unlock_irqrestore(&chan->vc.lock, flags);383dma_set_residue(txstate, bytes);384385return status;386}387388static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)389{390desc->lli->llp = cpu_to_le64(adr);391}392393static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)394{395axi_chan_iowrite64(chan, CH_LLP, adr);396}397398static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)399{400u32 offset = DMAC_APB_BYTE_WR_CH_EN;401u32 reg_width, val;402403if (!chan->chip->apb_regs) {404dev_dbg(chan->chip->dev, "apb_regs not initialized\n");405return;406}407408reg_width = __ffs(chan->config.dst_addr_width);409if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)410offset = DMAC_APB_HALFWORD_WR_CH_EN;411412val = ioread32(chan->chip->apb_regs + offset);413414if (set)415val |= BIT(chan->id);416else417val &= ~BIT(chan->id);418419iowrite32(val, chan->chip->apb_regs + offset);420}421/* Called in chan locked context */422static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,423struct axi_dma_desc *first)424{425u32 priority = chan->chip->dw->hdata->priority[chan->id];426struct axi_dma_chan_config config = {};427u32 irq_mask;428u8 lms = 0; /* Select AXI0 master for LLI fetching */429430if (unlikely(axi_chan_is_hw_enable(chan))) {431dev_err(chan2dev(chan), "%s is non-idle!\n",432axi_chan_name(chan));433434return;435}436437axi_dma_enable(chan->chip);438439config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;440config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;441config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;442config.prior = priority;443config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;444config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;445switch (chan->direction) {446case DMA_MEM_TO_DEV:447dw_axi_dma_set_byte_halfword(chan, true);448config.tt_fc = chan->config.device_fc ?449DWAXIDMAC_TT_FC_MEM_TO_PER_DST :450DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC;451if (chan->chip->apb_regs)452config.dst_per = chan->id;453else454config.dst_per = chan->hw_handshake_num;455break;456case DMA_DEV_TO_MEM:457config.tt_fc = chan->config.device_fc ?458DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :459DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC;460if (chan->chip->apb_regs)461config.src_per = chan->id;462else463config.src_per = chan->hw_handshake_num;464break;465default:466break;467}468axi_chan_config_write(chan, &config);469470write_chan_llp(chan, first->hw_desc[0].llp | lms);471472irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;473axi_chan_irq_sig_set(chan, irq_mask);474475/* Generate 'suspend' status but don't generate interrupt */476irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;477axi_chan_irq_set(chan, irq_mask);478479axi_chan_enable(chan);480}481482static void axi_chan_start_first_queued(struct axi_dma_chan *chan)483{484struct axi_dma_desc *desc;485struct virt_dma_desc *vd;486487vd = vchan_next_desc(&chan->vc);488if (!vd)489return;490491desc = vd_to_axi_desc(vd);492dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),493vd->tx.cookie);494axi_chan_block_xfer_start(chan, desc);495}496497static void dma_chan_issue_pending(struct dma_chan *dchan)498{499struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);500unsigned long flags;501502spin_lock_irqsave(&chan->vc.lock, flags);503if (vchan_issue_pending(&chan->vc))504axi_chan_start_first_queued(chan);505spin_unlock_irqrestore(&chan->vc.lock, flags);506}507508static void dw_axi_dma_synchronize(struct dma_chan *dchan)509{510struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);511512vchan_synchronize(&chan->vc);513}514515static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)516{517struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);518519/* ASSERT: channel is idle */520if (axi_chan_is_hw_enable(chan)) {521dev_err(chan2dev(chan), "%s is non-idle!\n",522axi_chan_name(chan));523return -EBUSY;524}525526/* LLI address must be aligned to a 64-byte boundary */527chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),528chan->chip->dev,529sizeof(struct axi_dma_lli),53064, 0);531if (!chan->desc_pool) {532dev_err(chan2dev(chan), "No memory for descriptors\n");533return -ENOMEM;534}535dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));536537pm_runtime_get(chan->chip->dev);538539return 0;540}541542static void dma_chan_free_chan_resources(struct dma_chan *dchan)543{544struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);545546/* ASSERT: channel is idle */547if (axi_chan_is_hw_enable(chan))548dev_err(dchan2dev(dchan), "%s is non-idle!\n",549axi_chan_name(chan));550551axi_chan_disable(chan);552axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);553554vchan_free_chan_resources(&chan->vc);555556dma_pool_destroy(chan->desc_pool);557chan->desc_pool = NULL;558dev_vdbg(dchan2dev(dchan),559"%s: free resources, descriptor still allocated: %u\n",560axi_chan_name(chan), atomic_read(&chan->descs_allocated));561562pm_runtime_put(chan->chip->dev);563}564565static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)566{567struct axi_dma_chip *chip = chan->chip;568unsigned long reg_value, val;569570if (!chip->apb_regs) {571dev_err(chip->dev, "apb_regs not initialized\n");572return;573}574575/*576* An unused DMA channel has a default value of 0x3F.577* Lock the DMA channel by assign a handshake number to the channel.578* Unlock the DMA channel by assign 0x3F to the channel.579*/580if (set)581val = chan->hw_handshake_num;582else583val = UNUSED_CHANNEL;584585reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);586587/* Channel is already allocated, set handshake as per channel ID */588/* 64 bit write should handle for 8 channels */589590reg_value &= ~(DMA_APB_HS_SEL_MASK <<591(chan->id * DMA_APB_HS_SEL_BIT_SIZE));592reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));593lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);594595return;596}597598/*599* If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI600* as 1, it understands that the current block is the final block in the601* transfer and completes the DMA transfer operation at the end of current602* block transfer.603*/604static void set_desc_last(struct axi_dma_hw_desc *desc)605{606u32 val;607608val = le32_to_cpu(desc->lli->ctl_hi);609val |= CH_CTL_H_LLI_LAST;610desc->lli->ctl_hi = cpu_to_le32(val);611}612613static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)614{615desc->lli->sar = cpu_to_le64(adr);616}617618static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)619{620desc->lli->dar = cpu_to_le64(adr);621}622623static void set_desc_src_master(struct axi_dma_hw_desc *desc)624{625u32 val;626627/* Select AXI0 for source master */628val = le32_to_cpu(desc->lli->ctl_lo);629val &= ~CH_CTL_L_SRC_MAST;630desc->lli->ctl_lo = cpu_to_le32(val);631}632633static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,634struct axi_dma_desc *desc)635{636u32 val;637638/* Select AXI1 for source master if available */639val = le32_to_cpu(hw_desc->lli->ctl_lo);640if (desc->chan->chip->dw->hdata->nr_masters > 1)641val |= CH_CTL_L_DST_MAST;642else643val &= ~CH_CTL_L_DST_MAST;644645hw_desc->lli->ctl_lo = cpu_to_le32(val);646}647648static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,649struct axi_dma_hw_desc *hw_desc,650dma_addr_t mem_addr, size_t len)651{652unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);653unsigned int reg_width;654unsigned int mem_width;655dma_addr_t device_addr;656size_t axi_block_ts;657size_t block_ts;658u32 ctllo, ctlhi;659u32 burst_len;660661axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];662663mem_width = __ffs(data_width | mem_addr | len);664if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)665mem_width = DWAXIDMAC_TRANS_WIDTH_32;666667if (!IS_ALIGNED(mem_addr, 4)) {668dev_err(chan->chip->dev, "invalid buffer alignment\n");669return -EINVAL;670}671672switch (chan->direction) {673case DMA_MEM_TO_DEV:674reg_width = __ffs(chan->config.dst_addr_width);675device_addr = chan->config.dst_addr;676ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |677mem_width << CH_CTL_L_SRC_WIDTH_POS |678DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |679DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;680block_ts = len >> mem_width;681break;682case DMA_DEV_TO_MEM:683reg_width = __ffs(chan->config.src_addr_width);684device_addr = chan->config.src_addr;685ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |686mem_width << CH_CTL_L_DST_WIDTH_POS |687DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |688DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;689block_ts = len >> reg_width;690break;691default:692return -EINVAL;693}694695if (block_ts > axi_block_ts)696return -EINVAL;697698hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);699if (unlikely(!hw_desc->lli))700return -ENOMEM;701702ctlhi = CH_CTL_H_LLI_VALID;703704if (chan->chip->dw->hdata->restrict_axi_burst_len) {705burst_len = chan->chip->dw->hdata->axi_rw_burst_len;706ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |707burst_len << CH_CTL_H_ARLEN_POS |708burst_len << CH_CTL_H_AWLEN_POS;709}710711hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);712713if (chan->direction == DMA_MEM_TO_DEV) {714write_desc_sar(hw_desc, mem_addr);715write_desc_dar(hw_desc, device_addr);716} else {717write_desc_sar(hw_desc, device_addr);718write_desc_dar(hw_desc, mem_addr);719}720721hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);722723ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |724DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;725hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);726727set_desc_src_master(hw_desc);728729hw_desc->len = len;730return 0;731}732733static size_t calculate_block_len(struct axi_dma_chan *chan,734dma_addr_t dma_addr, size_t buf_len,735enum dma_transfer_direction direction)736{737u32 data_width, reg_width, mem_width;738size_t axi_block_ts, block_len;739740axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];741742switch (direction) {743case DMA_MEM_TO_DEV:744data_width = BIT(chan->chip->dw->hdata->m_data_width);745mem_width = __ffs(data_width | dma_addr | buf_len);746if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)747mem_width = DWAXIDMAC_TRANS_WIDTH_32;748749block_len = axi_block_ts << mem_width;750break;751case DMA_DEV_TO_MEM:752reg_width = __ffs(chan->config.src_addr_width);753block_len = axi_block_ts << reg_width;754break;755default:756block_len = 0;757}758759return block_len;760}761762static struct dma_async_tx_descriptor *763dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,764size_t buf_len, size_t period_len,765enum dma_transfer_direction direction,766unsigned long flags)767{768struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);769struct axi_dma_hw_desc *hw_desc = NULL;770struct axi_dma_desc *desc = NULL;771dma_addr_t src_addr = dma_addr;772u32 num_periods, num_segments;773size_t axi_block_len;774u32 total_segments;775u32 segment_len;776unsigned int i;777int status;778u64 llp = 0;779u8 lms = 0; /* Select AXI0 master for LLI fetching */780781num_periods = buf_len / period_len;782783axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);784if (axi_block_len == 0)785return NULL;786787num_segments = DIV_ROUND_UP(period_len, axi_block_len);788segment_len = DIV_ROUND_UP(period_len, num_segments);789790total_segments = num_periods * num_segments;791792desc = axi_desc_alloc(total_segments);793if (unlikely(!desc))794goto err_desc_get;795796chan->direction = direction;797desc->chan = chan;798chan->cyclic = true;799desc->length = 0;800desc->period_len = period_len;801802for (i = 0; i < total_segments; i++) {803hw_desc = &desc->hw_desc[i];804805status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,806segment_len);807if (status < 0)808goto err_desc_get;809810desc->length += hw_desc->len;811/* Set end-of-link to the linked descriptor, so that cyclic812* callback function can be triggered during interrupt.813*/814set_desc_last(hw_desc);815816src_addr += segment_len;817}818819llp = desc->hw_desc[0].llp;820821/* Managed transfer list */822do {823hw_desc = &desc->hw_desc[--total_segments];824write_desc_llp(hw_desc, llp | lms);825llp = hw_desc->llp;826} while (total_segments);827828dw_axi_dma_set_hw_channel(chan, true);829830return vchan_tx_prep(&chan->vc, &desc->vd, flags);831832err_desc_get:833if (desc)834axi_desc_put(desc);835836return NULL;837}838839static struct dma_async_tx_descriptor *840dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,841unsigned int sg_len,842enum dma_transfer_direction direction,843unsigned long flags, void *context)844{845struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);846struct axi_dma_hw_desc *hw_desc = NULL;847struct axi_dma_desc *desc = NULL;848u32 num_segments, segment_len;849unsigned int loop = 0;850struct scatterlist *sg;851size_t axi_block_len;852u32 len, num_sgs;853unsigned int i;854dma_addr_t mem;855int status;856u64 llp = 0;857u8 lms = 0; /* Select AXI0 master for LLI fetching */858859if (unlikely(!is_slave_direction(direction) || !sg_len))860return NULL;861862mem = sg_dma_address(sgl);863len = sg_dma_len(sgl);864865axi_block_len = calculate_block_len(chan, mem, len, direction);866if (axi_block_len == 0)867return NULL;868869num_sgs = sg_nents_for_dma(sgl, sg_len, axi_block_len);870desc = axi_desc_alloc(num_sgs);871if (unlikely(!desc))872goto err_desc_get;873874desc->chan = chan;875desc->length = 0;876chan->direction = direction;877878for_each_sg(sgl, sg, sg_len, i) {879mem = sg_dma_address(sg);880len = sg_dma_len(sg);881num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);882segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);883884do {885hw_desc = &desc->hw_desc[loop++];886status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);887if (status < 0)888goto err_desc_get;889890desc->length += hw_desc->len;891len -= segment_len;892mem += segment_len;893} while (len >= segment_len);894}895896/* Set end-of-link to the last link descriptor of list */897set_desc_last(&desc->hw_desc[num_sgs - 1]);898899/* Managed transfer list */900do {901hw_desc = &desc->hw_desc[--num_sgs];902write_desc_llp(hw_desc, llp | lms);903llp = hw_desc->llp;904} while (num_sgs);905906dw_axi_dma_set_hw_channel(chan, true);907908return vchan_tx_prep(&chan->vc, &desc->vd, flags);909910err_desc_get:911if (desc)912axi_desc_put(desc);913914return NULL;915}916917static struct dma_async_tx_descriptor *918dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,919dma_addr_t src_adr, size_t len, unsigned long flags)920{921struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);922size_t block_ts, max_block_ts, xfer_len;923struct axi_dma_hw_desc *hw_desc = NULL;924struct axi_dma_desc *desc = NULL;925u32 xfer_width, reg, num;926u64 llp = 0;927u8 lms = 0; /* Select AXI0 master for LLI fetching */928929dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",930axi_chan_name(chan), &src_adr, &dst_adr, len, flags);931932max_block_ts = chan->chip->dw->hdata->block_size[chan->id];933xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);934num = DIV_ROUND_UP(len, max_block_ts << xfer_width);935desc = axi_desc_alloc(num);936if (unlikely(!desc))937goto err_desc_get;938939desc->chan = chan;940num = 0;941desc->length = 0;942while (len) {943xfer_len = len;944945hw_desc = &desc->hw_desc[num];946/*947* Take care for the alignment.948* Actually source and destination widths can be different, but949* make them same to be simpler.950*/951xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);952953/*954* block_ts indicates the total number of data of width955* to be transferred in a DMA block transfer.956* BLOCK_TS register should be set to block_ts - 1957*/958block_ts = xfer_len >> xfer_width;959if (block_ts > max_block_ts) {960block_ts = max_block_ts;961xfer_len = max_block_ts << xfer_width;962}963964hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);965if (unlikely(!hw_desc->lli))966goto err_desc_get;967968write_desc_sar(hw_desc, src_adr);969write_desc_dar(hw_desc, dst_adr);970hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);971972reg = CH_CTL_H_LLI_VALID;973if (chan->chip->dw->hdata->restrict_axi_burst_len) {974u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;975976reg |= (CH_CTL_H_ARLEN_EN |977burst_len << CH_CTL_H_ARLEN_POS |978CH_CTL_H_AWLEN_EN |979burst_len << CH_CTL_H_AWLEN_POS);980}981hw_desc->lli->ctl_hi = cpu_to_le32(reg);982983reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |984DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |985xfer_width << CH_CTL_L_DST_WIDTH_POS |986xfer_width << CH_CTL_L_SRC_WIDTH_POS |987DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |988DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);989hw_desc->lli->ctl_lo = cpu_to_le32(reg);990991set_desc_src_master(hw_desc);992set_desc_dest_master(hw_desc, desc);993994hw_desc->len = xfer_len;995desc->length += hw_desc->len;996/* update the length and addresses for the next loop cycle */997len -= xfer_len;998dst_adr += xfer_len;999src_adr += xfer_len;1000num++;1001}10021003/* Set end-of-link to the last link descriptor of list */1004set_desc_last(&desc->hw_desc[num - 1]);1005/* Managed transfer list */1006do {1007hw_desc = &desc->hw_desc[--num];1008write_desc_llp(hw_desc, llp | lms);1009llp = hw_desc->llp;1010} while (num);10111012return vchan_tx_prep(&chan->vc, &desc->vd, flags);10131014err_desc_get:1015if (desc)1016axi_desc_put(desc);1017return NULL;1018}10191020static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,1021struct dma_slave_config *config)1022{1023struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);10241025memcpy(&chan->config, config, sizeof(*config));10261027return 0;1028}10291030static void axi_chan_dump_lli(struct axi_dma_chan *chan,1031struct axi_dma_hw_desc *desc)1032{1033if (!desc->lli) {1034dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");1035return;1036}10371038dev_err(dchan2dev(&chan->vc.chan),1039"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",1040le64_to_cpu(desc->lli->sar),1041le64_to_cpu(desc->lli->dar),1042le64_to_cpu(desc->lli->llp),1043le32_to_cpu(desc->lli->block_ts_lo),1044le32_to_cpu(desc->lli->ctl_hi),1045le32_to_cpu(desc->lli->ctl_lo));1046}10471048static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,1049struct axi_dma_desc *desc_head)1050{1051int count = atomic_read(&chan->descs_allocated);1052int i;10531054for (i = 0; i < count; i++)1055axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);1056}10571058static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)1059{1060struct virt_dma_desc *vd;1061unsigned long flags;10621063spin_lock_irqsave(&chan->vc.lock, flags);10641065axi_chan_disable(chan);10661067/* The bad descriptor currently is in the head of vc list */1068vd = vchan_next_desc(&chan->vc);1069if (!vd) {1070dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",1071axi_chan_name(chan));1072goto out;1073}1074/* Remove the completed descriptor from issued list */1075list_del(&vd->node);10761077/* WARN about bad descriptor */1078dev_err(chan2dev(chan),1079"Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",1080axi_chan_name(chan), vd->tx.cookie, status);1081axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));10821083vchan_cookie_complete(vd);10841085/* Try to restart the controller */1086axi_chan_start_first_queued(chan);10871088out:1089spin_unlock_irqrestore(&chan->vc.lock, flags);1090}10911092static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)1093{1094int count = atomic_read(&chan->descs_allocated);1095struct axi_dma_hw_desc *hw_desc;1096struct axi_dma_desc *desc;1097struct virt_dma_desc *vd;1098unsigned long flags;1099u64 llp;1100int i;11011102spin_lock_irqsave(&chan->vc.lock, flags);1103if (unlikely(axi_chan_is_hw_enable(chan))) {1104dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",1105axi_chan_name(chan));1106axi_chan_disable(chan);1107}11081109/* The completed descriptor currently is in the head of vc list */1110vd = vchan_next_desc(&chan->vc);1111if (!vd) {1112dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",1113axi_chan_name(chan));1114goto out;1115}11161117if (chan->cyclic) {1118desc = vd_to_axi_desc(vd);1119if (desc) {1120llp = lo_hi_readq(chan->chan_regs + CH_LLP);1121for (i = 0; i < count; i++) {1122hw_desc = &desc->hw_desc[i];1123if (hw_desc->llp == llp) {1124axi_chan_irq_clear(chan, hw_desc->lli->status_lo);1125hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;1126desc->completed_blocks = i;11271128if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)1129vchan_cyclic_callback(vd);1130break;1131}1132}11331134axi_chan_enable(chan);1135}1136} else {1137/* Remove the completed descriptor from issued list before completing */1138list_del(&vd->node);1139vchan_cookie_complete(vd);1140}11411142out:1143spin_unlock_irqrestore(&chan->vc.lock, flags);1144}11451146static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)1147{1148struct axi_dma_chip *chip = dev_id;1149struct dw_axi_dma *dw = chip->dw;1150struct axi_dma_chan *chan;11511152u32 status, i;11531154/* Disable DMAC interrupts. We'll enable them after processing channels */1155axi_dma_irq_disable(chip);11561157/* Poll, clear and process every channel interrupt status */1158for (i = 0; i < dw->hdata->nr_channels; i++) {1159chan = &dw->chan[i];1160status = axi_chan_irq_read(chan);1161axi_chan_irq_clear(chan, status);11621163dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",1164axi_chan_name(chan), i, status);11651166if (status & DWAXIDMAC_IRQ_ALL_ERR)1167axi_chan_handle_err(chan, status);1168else if (status & DWAXIDMAC_IRQ_DMA_TRF)1169axi_chan_block_xfer_complete(chan);1170}11711172/* Re-enable interrupts */1173axi_dma_irq_enable(chip);11741175return IRQ_HANDLED;1176}11771178static int dma_chan_terminate_all(struct dma_chan *dchan)1179{1180struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);1181u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;1182unsigned long flags;1183u32 val;1184int ret;1185LIST_HEAD(head);11861187axi_chan_disable(chan);11881189ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,1190!(val & chan_active), 1000, 50000);1191if (ret == -ETIMEDOUT)1192dev_warn(dchan2dev(dchan),1193"%s failed to stop\n", axi_chan_name(chan));11941195if (chan->direction != DMA_MEM_TO_MEM)1196dw_axi_dma_set_hw_channel(chan, false);1197if (chan->direction == DMA_MEM_TO_DEV)1198dw_axi_dma_set_byte_halfword(chan, false);11991200spin_lock_irqsave(&chan->vc.lock, flags);12011202vchan_get_all_descriptors(&chan->vc, &head);12031204chan->cyclic = false;1205spin_unlock_irqrestore(&chan->vc.lock, flags);12061207vchan_dma_desc_free_list(&chan->vc, &head);12081209dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));12101211return 0;1212}12131214static int dma_chan_pause(struct dma_chan *dchan)1215{1216struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);1217unsigned long flags;1218unsigned int timeout = 20; /* timeout iterations */1219u64 val;12201221spin_lock_irqsave(&chan->vc.lock, flags);12221223if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {1224val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);1225if (chan->id >= DMAC_CHAN_16) {1226val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)1227<< (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |1228(u64)(BIT(chan->id) >> DMAC_CHAN_16)1229<< (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);1230} else {1231val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |1232BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;1233}1234axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);1235} else {1236if (chan->chip->dw->hdata->reg_map_8_channels) {1237val = axi_dma_ioread32(chan->chip, DMAC_CHEN);1238val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |1239BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;1240axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);1241} else {1242val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);1243val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |1244BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;1245axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);1246}1247}12481249do {1250if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)1251break;12521253udelay(2);1254} while (--timeout);12551256axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);12571258chan->is_paused = true;12591260spin_unlock_irqrestore(&chan->vc.lock, flags);12611262return timeout ? 0 : -EAGAIN;1263}12641265/* Called in chan locked context */1266static inline void axi_chan_resume(struct axi_dma_chan *chan)1267{1268u64 val;12691270if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {1271val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);1272if (chan->id >= DMAC_CHAN_16) {1273val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)1274<< (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT));1275val |= ((u64)(BIT(chan->id) >> DMAC_CHAN_16)1276<< (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT));1277} else {1278val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);1279val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);1280}1281axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);1282} else {1283if (chan->chip->dw->hdata->reg_map_8_channels) {1284val = axi_dma_ioread32(chan->chip, DMAC_CHEN);1285val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);1286val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);1287axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);1288} else {1289val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);1290val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);1291val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);1292axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);1293}1294}12951296chan->is_paused = false;1297}12981299static int dma_chan_resume(struct dma_chan *dchan)1300{1301struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);1302unsigned long flags;13031304spin_lock_irqsave(&chan->vc.lock, flags);13051306if (chan->is_paused)1307axi_chan_resume(chan);13081309spin_unlock_irqrestore(&chan->vc.lock, flags);13101311return 0;1312}13131314static int axi_dma_suspend(struct axi_dma_chip *chip)1315{1316axi_dma_irq_disable(chip);1317axi_dma_disable(chip);13181319clk_disable_unprepare(chip->core_clk);1320clk_disable_unprepare(chip->cfgr_clk);13211322return 0;1323}13241325static int axi_dma_resume(struct axi_dma_chip *chip)1326{1327int ret;13281329ret = clk_prepare_enable(chip->cfgr_clk);1330if (ret < 0)1331return ret;13321333ret = clk_prepare_enable(chip->core_clk);1334if (ret < 0)1335return ret;13361337axi_dma_enable(chip);1338axi_dma_irq_enable(chip);13391340return 0;1341}13421343static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)1344{1345struct axi_dma_chip *chip = dev_get_drvdata(dev);13461347return axi_dma_suspend(chip);1348}13491350static int __maybe_unused axi_dma_runtime_resume(struct device *dev)1351{1352struct axi_dma_chip *chip = dev_get_drvdata(dev);13531354return axi_dma_resume(chip);1355}13561357static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,1358struct of_dma *ofdma)1359{1360struct dw_axi_dma *dw = ofdma->of_dma_data;1361struct axi_dma_chan *chan;1362struct dma_chan *dchan;13631364dchan = dma_get_any_slave_channel(&dw->dma);1365if (!dchan)1366return NULL;13671368chan = dchan_to_axi_dma_chan(dchan);1369chan->hw_handshake_num = dma_spec->args[0];1370return dchan;1371}13721373static int parse_device_properties(struct axi_dma_chip *chip)1374{1375struct device *dev = chip->dev;1376u32 tmp, carr[DMAC_MAX_CHANNELS];1377int ret;13781379ret = device_property_read_u32(dev, "dma-channels", &tmp);1380if (ret)1381return ret;1382if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)1383return -EINVAL;13841385chip->dw->hdata->nr_channels = tmp;1386if (tmp <= DMA_REG_MAP_CH_REF)1387chip->dw->hdata->reg_map_8_channels = true;13881389ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);1390if (ret)1391return ret;1392if (tmp == 0 || tmp > DMAC_MAX_MASTERS)1393return -EINVAL;13941395chip->dw->hdata->nr_masters = tmp;13961397ret = device_property_read_u32(dev, "snps,data-width", &tmp);1398if (ret)1399return ret;1400if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)1401return -EINVAL;14021403chip->dw->hdata->m_data_width = tmp;14041405ret = device_property_read_u32_array(dev, "snps,block-size", carr,1406chip->dw->hdata->nr_channels);1407if (ret)1408return ret;1409for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {1410if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)1411return -EINVAL;14121413chip->dw->hdata->block_size[tmp] = carr[tmp];1414}14151416ret = device_property_read_u32_array(dev, "snps,priority", carr,1417chip->dw->hdata->nr_channels);1418if (ret)1419return ret;1420/* Priority value must be programmed within [0:nr_channels-1] range */1421for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {1422if (carr[tmp] >= chip->dw->hdata->nr_channels)1423return -EINVAL;14241425chip->dw->hdata->priority[tmp] = carr[tmp];1426}14271428/* axi-max-burst-len is optional property */1429ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);1430if (!ret) {1431if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)1432return -EINVAL;1433if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)1434return -EINVAL;14351436chip->dw->hdata->restrict_axi_burst_len = true;1437chip->dw->hdata->axi_rw_burst_len = tmp;1438}14391440return 0;1441}14421443static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip)1444{1445int irq_count = platform_irq_count(pdev);1446int ret;14471448for (int i = 0; i < irq_count; i++) {1449chip->irq[i] = platform_get_irq(pdev, i);1450if (chip->irq[i] < 0)1451return chip->irq[i];1452ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt,1453IRQF_SHARED, KBUILD_MODNAME, chip);1454if (ret < 0)1455return ret;1456}14571458return 0;1459}14601461static int dw_probe(struct platform_device *pdev)1462{1463struct axi_dma_chip *chip;1464struct dw_axi_dma *dw;1465struct dw_axi_dma_hcfg *hdata;1466struct reset_control *resets;1467unsigned int flags;1468u32 i;1469int ret;14701471chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);1472if (!chip)1473return -ENOMEM;14741475dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);1476if (!dw)1477return -ENOMEM;14781479hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);1480if (!hdata)1481return -ENOMEM;14821483chip->dw = dw;1484chip->dev = &pdev->dev;1485chip->dw->hdata = hdata;14861487chip->regs = devm_platform_ioremap_resource(pdev, 0);1488if (IS_ERR(chip->regs))1489return PTR_ERR(chip->regs);14901491flags = (uintptr_t)of_device_get_match_data(&pdev->dev);1492if (flags & AXI_DMA_FLAG_HAS_APB_REGS) {1493chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);1494if (IS_ERR(chip->apb_regs))1495return PTR_ERR(chip->apb_regs);1496}14971498if (flags & AXI_DMA_FLAG_HAS_RESETS) {1499resets = devm_reset_control_array_get_exclusive(&pdev->dev);1500if (IS_ERR(resets))1501return PTR_ERR(resets);15021503ret = reset_control_deassert(resets);1504if (ret)1505return ret;1506}15071508chip->dw->hdata->use_cfg2 = !!(flags & AXI_DMA_FLAG_USE_CFG2);15091510chip->core_clk = devm_clk_get(chip->dev, "core-clk");1511if (IS_ERR(chip->core_clk))1512return PTR_ERR(chip->core_clk);15131514chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");1515if (IS_ERR(chip->cfgr_clk))1516return PTR_ERR(chip->cfgr_clk);15171518ret = parse_device_properties(chip);1519if (ret)1520return ret;15211522dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,1523sizeof(*dw->chan), GFP_KERNEL);1524if (!dw->chan)1525return -ENOMEM;15261527ret = axi_req_irqs(pdev, chip);1528if (ret)1529return ret;15301531INIT_LIST_HEAD(&dw->dma.channels);1532for (i = 0; i < hdata->nr_channels; i++) {1533struct axi_dma_chan *chan = &dw->chan[i];15341535chan->chip = chip;1536chan->id = i;1537chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;1538atomic_set(&chan->descs_allocated, 0);15391540chan->vc.desc_free = vchan_desc_put;1541vchan_init(&chan->vc, &dw->dma);1542}15431544/* Set capabilities */1545dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);1546dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);1547dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);15481549/* DMA capabilities */1550dw->dma.max_burst = hdata->axi_rw_burst_len;1551dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;1552dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;1553dw->dma.directions = BIT(DMA_MEM_TO_MEM);1554dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);1555dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;15561557dw->dma.dev = chip->dev;1558dw->dma.device_tx_status = dma_chan_tx_status;1559dw->dma.device_issue_pending = dma_chan_issue_pending;1560dw->dma.device_terminate_all = dma_chan_terminate_all;1561dw->dma.device_pause = dma_chan_pause;1562dw->dma.device_resume = dma_chan_resume;15631564dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;1565dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;15661567dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;1568dw->dma.device_synchronize = dw_axi_dma_synchronize;1569dw->dma.device_config = dw_axi_dma_chan_slave_config;1570dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;1571dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;15721573/*1574* Synopsis DesignWare AxiDMA datasheet mentioned Maximum1575* supported blocks is 1024. Device register width is 4 bytes.1576* Therefore, set constraint to 1024 * 4.1577*/1578dw->dma.dev->dma_parms = &dw->dma_parms;1579dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);1580platform_set_drvdata(pdev, chip);15811582pm_runtime_enable(chip->dev);15831584/*1585* We can't just call pm_runtime_get here instead of1586* pm_runtime_get_noresume + axi_dma_resume because we need1587* driver to work also without Runtime PM.1588*/1589pm_runtime_get_noresume(chip->dev);1590ret = axi_dma_resume(chip);1591if (ret < 0)1592goto err_pm_disable;15931594axi_dma_hw_init(chip);15951596pm_runtime_put(chip->dev);15971598ret = dmaenginem_async_device_register(&dw->dma);1599if (ret)1600goto err_pm_disable;16011602/* Register with OF helpers for DMA lookups */1603ret = of_dma_controller_register(pdev->dev.of_node,1604dw_axi_dma_of_xlate, dw);1605if (ret < 0)1606dev_warn(&pdev->dev,1607"Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");16081609dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",1610dw->hdata->nr_channels);16111612return 0;16131614err_pm_disable:1615pm_runtime_disable(chip->dev);16161617return ret;1618}16191620static void dw_remove(struct platform_device *pdev)1621{1622struct axi_dma_chip *chip = platform_get_drvdata(pdev);1623struct dw_axi_dma *dw = chip->dw;1624struct axi_dma_chan *chan, *_chan;1625u32 i;16261627/* Enable clk before accessing to registers */1628clk_prepare_enable(chip->cfgr_clk);1629clk_prepare_enable(chip->core_clk);1630axi_dma_irq_disable(chip);1631for (i = 0; i < dw->hdata->nr_channels; i++) {1632axi_chan_disable(&chip->dw->chan[i]);1633axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);1634}1635axi_dma_disable(chip);16361637pm_runtime_disable(chip->dev);1638axi_dma_suspend(chip);16391640for (i = 0; i < DMAC_MAX_CHANNELS; i++)1641if (chip->irq[i] > 0)1642devm_free_irq(chip->dev, chip->irq[i], chip);16431644of_dma_controller_free(chip->dev->of_node);16451646list_for_each_entry_safe(chan, _chan, &dw->dma.channels,1647vc.chan.device_node) {1648list_del(&chan->vc.chan.device_node);1649tasklet_kill(&chan->vc.task);1650}1651}16521653static const struct dev_pm_ops dw_axi_dma_pm_ops = {1654SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)1655};16561657static const struct of_device_id dw_dma_of_id_table[] = {1658{1659.compatible = "snps,axi-dma-1.01a"1660}, {1661.compatible = "intel,kmb-axi-dma",1662.data = (void *)AXI_DMA_FLAG_HAS_APB_REGS,1663}, {1664.compatible = "starfive,jh7110-axi-dma",1665.data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),1666}, {1667.compatible = "starfive,jh8100-axi-dma",1668.data = (void *)AXI_DMA_FLAG_HAS_RESETS,1669},1670{}1671};1672MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);16731674static struct platform_driver dw_driver = {1675.probe = dw_probe,1676.remove = dw_remove,1677.driver = {1678.name = KBUILD_MODNAME,1679.of_match_table = dw_dma_of_id_table,1680.pm = &dw_axi_dma_pm_ops,1681},1682};1683module_platform_driver(dw_driver);16841685MODULE_LICENSE("GPL v2");1686MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");1687MODULE_AUTHOR("Eugeniy Paltsev <[email protected]>");168816891690