Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/at_xdmac.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
4
*
5
* Copyright (C) 2014 Atmel Corporation
6
*
7
* Author: Ludovic Desroches <[email protected]>
8
*/
9
10
#include <asm/barrier.h>
11
#include <dt-bindings/dma/at91.h>
12
#include <linux/clk.h>
13
#include <linux/dmaengine.h>
14
#include <linux/dmapool.h>
15
#include <linux/interrupt.h>
16
#include <linux/irq.h>
17
#include <linux/kernel.h>
18
#include <linux/list.h>
19
#include <linux/module.h>
20
#include <linux/of_dma.h>
21
#include <linux/of_platform.h>
22
#include <linux/platform_device.h>
23
#include <linux/pm.h>
24
#include <linux/pm_runtime.h>
25
26
#include "dmaengine.h"
27
28
/* Global registers */
29
#define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
30
#define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
31
#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
32
#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
33
#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
34
#define AT_XDMAC_WRHP(i) (((i) & 0xF) << 4)
35
#define AT_XDMAC_WRMP(i) (((i) & 0xF) << 8)
36
#define AT_XDMAC_WRLP(i) (((i) & 0xF) << 12)
37
#define AT_XDMAC_RDHP(i) (((i) & 0xF) << 16)
38
#define AT_XDMAC_RDMP(i) (((i) & 0xF) << 20)
39
#define AT_XDMAC_RDLP(i) (((i) & 0xF) << 24)
40
#define AT_XDMAC_RDSG(i) (((i) & 0xF) << 28)
41
#define AT_XDMAC_GCFG_M2M (AT_XDMAC_RDLP(0xF) | AT_XDMAC_WRLP(0xF))
42
#define AT_XDMAC_GCFG_P2M (AT_XDMAC_RDSG(0x1) | AT_XDMAC_RDHP(0x3) | \
43
AT_XDMAC_WRHP(0x5))
44
#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
45
#define AT_XDMAC_PW0(i) (((i) & 0xF) << 0)
46
#define AT_XDMAC_PW1(i) (((i) & 0xF) << 4)
47
#define AT_XDMAC_PW2(i) (((i) & 0xF) << 8)
48
#define AT_XDMAC_PW3(i) (((i) & 0xF) << 12)
49
#define AT_XDMAC_GWAC_M2M 0
50
#define AT_XDMAC_GWAC_P2M (AT_XDMAC_PW0(0xF) | AT_XDMAC_PW2(0xF))
51
52
#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
53
#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
54
#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
55
#define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
56
#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
57
#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
58
#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
59
#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
60
61
/* Channel relative registers offsets */
62
#define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
63
#define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
64
#define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
65
#define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
66
#define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
67
#define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
68
#define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
69
#define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
70
#define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
71
#define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
72
#define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
73
#define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
74
#define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
75
#define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
76
#define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
77
#define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
78
#define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
79
#define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
80
#define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
81
#define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
82
#define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
83
#define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
84
#define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
85
#define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
86
#define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
87
#define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
88
#define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
89
#define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
90
#define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
91
#define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
92
#define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
93
#define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
94
#define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
95
#define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
96
#define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
97
#define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
98
#define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
99
#define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
100
#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
101
#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
102
#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
103
#define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27)
104
#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
105
#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
106
#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
107
#define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
108
#define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
109
#define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
110
#define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
111
#define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
112
#define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
113
#define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
114
#define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
115
#define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
116
#define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
117
#define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
118
#define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
119
#define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
120
#define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
121
#define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
122
#define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
123
#define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
124
#define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
125
#define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
126
#define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
127
#define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
128
#define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
129
#define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
130
#define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
131
#define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
132
#define AT_XDMAC_CC_DWIDTH_OFFSET 11
133
#define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
134
#define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
135
#define AT_XDMAC_CC_DWIDTH_BYTE 0x0
136
#define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
137
#define AT_XDMAC_CC_DWIDTH_WORD 0x2
138
#define AT_XDMAC_CC_DWIDTH_DWORD 0x3
139
#define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
140
#define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
141
#define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
142
#define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
143
#define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
144
#define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
145
#define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
146
#define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
147
#define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
148
#define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
149
#define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
150
#define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
151
#define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
152
#define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
153
#define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
154
#define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
155
#define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
156
#define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
157
#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
158
#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
159
#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
160
#define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */
161
#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
162
#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
163
#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
164
165
/* Microblock control members */
166
#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
167
#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
168
#define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
169
#define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
170
#define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
171
#define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
172
#define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
173
#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
174
175
#define AT_XDMAC_MAX_CHAN 0x20
176
#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
177
#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
178
#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
179
180
#define AT_XDMAC_DMA_BUSWIDTHS\
181
(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
182
BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
183
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
184
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
185
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
186
187
enum atc_status {
188
AT_XDMAC_CHAN_IS_CYCLIC = 0,
189
AT_XDMAC_CHAN_IS_PAUSED,
190
AT_XDMAC_CHAN_IS_PAUSED_INTERNAL,
191
};
192
193
struct at_xdmac_layout {
194
/* Global Channel Read Suspend Register */
195
u8 grs;
196
/* Global Write Suspend Register */
197
u8 gws;
198
/* Global Channel Read Write Suspend Register */
199
u8 grws;
200
/* Global Channel Read Write Resume Register */
201
u8 grwr;
202
/* Global Channel Software Request Register */
203
u8 gswr;
204
/* Global channel Software Request Status Register */
205
u8 gsws;
206
/* Global Channel Software Flush Request Register */
207
u8 gswf;
208
/* Channel reg base */
209
u8 chan_cc_reg_base;
210
/* Source/Destination Interface must be specified or not */
211
bool sdif;
212
/* AXI queue priority configuration supported */
213
bool axi_config;
214
};
215
216
/* ----- Channels ----- */
217
struct at_xdmac_chan {
218
struct dma_chan chan;
219
void __iomem *ch_regs;
220
u32 mask; /* Channel Mask */
221
u32 cfg; /* Channel Configuration Register */
222
u8 perid; /* Peripheral ID */
223
u8 perif; /* Peripheral Interface */
224
u8 memif; /* Memory Interface */
225
u32 save_cc;
226
u32 save_cim;
227
u32 save_cnda;
228
u32 save_cndc;
229
u32 irq_status;
230
unsigned long status;
231
struct tasklet_struct tasklet;
232
struct dma_slave_config sconfig;
233
234
spinlock_t lock;
235
236
struct list_head xfers_list;
237
struct list_head free_descs_list;
238
};
239
240
241
/* ----- Controller ----- */
242
struct at_xdmac {
243
struct dma_device dma;
244
void __iomem *regs;
245
struct device *dev;
246
int irq;
247
struct clk *clk;
248
u32 save_gim;
249
u32 save_gs;
250
struct dma_pool *at_xdmac_desc_pool;
251
const struct at_xdmac_layout *layout;
252
struct at_xdmac_chan chan[];
253
};
254
255
256
/* ----- Descriptors ----- */
257
258
/* Linked List Descriptor */
259
struct at_xdmac_lld {
260
u32 mbr_nda; /* Next Descriptor Member */
261
u32 mbr_ubc; /* Microblock Control Member */
262
u32 mbr_sa; /* Source Address Member */
263
u32 mbr_da; /* Destination Address Member */
264
u32 mbr_cfg; /* Configuration Register */
265
u32 mbr_bc; /* Block Control Register */
266
u32 mbr_ds; /* Data Stride Register */
267
u32 mbr_sus; /* Source Microblock Stride Register */
268
u32 mbr_dus; /* Destination Microblock Stride Register */
269
};
270
271
/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
272
struct at_xdmac_desc {
273
struct at_xdmac_lld lld;
274
enum dma_transfer_direction direction;
275
struct dma_async_tx_descriptor tx_dma_desc;
276
struct list_head desc_node;
277
/* Following members are only used by the first descriptor */
278
bool active_xfer;
279
unsigned int xfer_size;
280
struct list_head descs_list;
281
struct list_head xfer_node;
282
} __aligned(sizeof(u64));
283
284
static const struct at_xdmac_layout at_xdmac_sama5d4_layout = {
285
.grs = 0x28,
286
.gws = 0x2C,
287
.grws = 0x30,
288
.grwr = 0x34,
289
.gswr = 0x38,
290
.gsws = 0x3C,
291
.gswf = 0x40,
292
.chan_cc_reg_base = 0x50,
293
.sdif = true,
294
.axi_config = false,
295
};
296
297
static const struct at_xdmac_layout at_xdmac_sama7g5_layout = {
298
.grs = 0x30,
299
.gws = 0x38,
300
.grws = 0x40,
301
.grwr = 0x44,
302
.gswr = 0x48,
303
.gsws = 0x4C,
304
.gswf = 0x50,
305
.chan_cc_reg_base = 0x60,
306
.sdif = false,
307
.axi_config = true,
308
};
309
310
static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
311
{
312
return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40);
313
}
314
315
#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
316
#define at_xdmac_write(atxdmac, reg, value) \
317
writel_relaxed((value), (atxdmac)->regs + (reg))
318
319
#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
320
#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
321
322
static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
323
{
324
return container_of(dchan, struct at_xdmac_chan, chan);
325
}
326
327
static struct device *chan2dev(struct dma_chan *chan)
328
{
329
return &chan->dev->device;
330
}
331
332
static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
333
{
334
return container_of(ddev, struct at_xdmac, dma);
335
}
336
337
static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
338
{
339
return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
340
}
341
342
static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
343
{
344
return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
345
}
346
347
static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
348
{
349
return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
350
}
351
352
static inline int at_xdmac_chan_is_paused_internal(struct at_xdmac_chan *atchan)
353
{
354
return test_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
355
}
356
357
static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
358
{
359
return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
360
}
361
362
static inline u8 at_xdmac_get_dwidth(u32 cfg)
363
{
364
return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
365
};
366
367
static unsigned int init_nr_desc_per_channel = 64;
368
module_param(init_nr_desc_per_channel, uint, 0644);
369
MODULE_PARM_DESC(init_nr_desc_per_channel,
370
"initial descriptors per channel (default: 64)");
371
372
373
static void at_xdmac_runtime_suspend_descriptors(struct at_xdmac_chan *atchan)
374
{
375
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
376
struct at_xdmac_desc *desc, *_desc;
377
378
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
379
if (!desc->active_xfer)
380
continue;
381
382
pm_runtime_mark_last_busy(atxdmac->dev);
383
pm_runtime_put_autosuspend(atxdmac->dev);
384
}
385
}
386
387
static int at_xdmac_runtime_resume_descriptors(struct at_xdmac_chan *atchan)
388
{
389
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
390
struct at_xdmac_desc *desc, *_desc;
391
int ret;
392
393
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
394
if (!desc->active_xfer)
395
continue;
396
397
ret = pm_runtime_resume_and_get(atxdmac->dev);
398
if (ret < 0)
399
return ret;
400
}
401
402
return 0;
403
}
404
405
static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
406
{
407
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
408
int ret;
409
410
ret = pm_runtime_resume_and_get(atxdmac->dev);
411
if (ret < 0)
412
return false;
413
414
ret = !!(at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask);
415
416
pm_runtime_mark_last_busy(atxdmac->dev);
417
pm_runtime_put_autosuspend(atxdmac->dev);
418
419
return ret;
420
}
421
422
static void at_xdmac_off(struct at_xdmac *atxdmac, bool suspend_descriptors)
423
{
424
struct dma_chan *chan, *_chan;
425
struct at_xdmac_chan *atchan;
426
int ret;
427
428
ret = pm_runtime_resume_and_get(atxdmac->dev);
429
if (ret < 0)
430
return;
431
432
at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
433
434
/* Wait that all chans are disabled. */
435
while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
436
cpu_relax();
437
438
at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
439
440
/* Decrement runtime PM ref counter for each active descriptor. */
441
if (!list_empty(&atxdmac->dma.channels) && suspend_descriptors) {
442
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels,
443
device_node) {
444
atchan = to_at_xdmac_chan(chan);
445
at_xdmac_runtime_suspend_descriptors(atchan);
446
}
447
}
448
449
pm_runtime_mark_last_busy(atxdmac->dev);
450
pm_runtime_put_autosuspend(atxdmac->dev);
451
}
452
453
/* Call with lock hold. */
454
static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
455
struct at_xdmac_desc *first)
456
{
457
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
458
u32 reg;
459
int ret;
460
461
ret = pm_runtime_resume_and_get(atxdmac->dev);
462
if (ret < 0)
463
return;
464
465
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
466
467
/* Set transfer as active to not try to start it again. */
468
first->active_xfer = true;
469
470
/* Tell xdmac where to get the first descriptor. */
471
reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys);
472
if (atxdmac->layout->sdif)
473
reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
474
475
at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
476
477
/*
478
* When doing non cyclic transfer we need to use the next
479
* descriptor view 2 since some fields of the configuration register
480
* depend on transfer size and src/dest addresses.
481
*/
482
if (at_xdmac_chan_is_cyclic(atchan))
483
reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
484
else if ((first->lld.mbr_ubc &
485
AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
486
reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
487
else
488
reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
489
/*
490
* Even if the register will be updated from the configuration in the
491
* descriptor when using view 2 or higher, the PROT bit won't be set
492
* properly. This bit can be modified only by using the channel
493
* configuration register.
494
*/
495
at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
496
497
reg |= AT_XDMAC_CNDC_NDDUP
498
| AT_XDMAC_CNDC_NDSUP
499
| AT_XDMAC_CNDC_NDE;
500
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
501
502
dev_vdbg(chan2dev(&atchan->chan),
503
"%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
504
__func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
505
at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
506
at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
507
at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
508
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
509
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
510
511
at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
512
reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
513
/*
514
* Request Overflow Error is only for peripheral synchronized transfers
515
*/
516
if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
517
reg |= AT_XDMAC_CIE_ROIE;
518
519
/*
520
* There is no end of list when doing cyclic dma, we need to get
521
* an interrupt after each periods.
522
*/
523
if (at_xdmac_chan_is_cyclic(atchan))
524
at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
525
reg | AT_XDMAC_CIE_BIE);
526
else
527
at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
528
reg | AT_XDMAC_CIE_LIE);
529
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
530
dev_vdbg(chan2dev(&atchan->chan),
531
"%s: enable channel (0x%08x)\n", __func__, atchan->mask);
532
wmb();
533
at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
534
535
dev_vdbg(chan2dev(&atchan->chan),
536
"%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
537
__func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
538
at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
539
at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
540
at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
541
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
542
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
543
}
544
545
static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
546
{
547
struct at_xdmac_desc *desc = txd_to_at_desc(tx);
548
struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
549
dma_cookie_t cookie;
550
unsigned long irqflags;
551
552
spin_lock_irqsave(&atchan->lock, irqflags);
553
cookie = dma_cookie_assign(tx);
554
555
list_add_tail(&desc->xfer_node, &atchan->xfers_list);
556
spin_unlock_irqrestore(&atchan->lock, irqflags);
557
558
dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
559
__func__, atchan, desc);
560
561
return cookie;
562
}
563
564
static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
565
gfp_t gfp_flags)
566
{
567
struct at_xdmac_desc *desc;
568
struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
569
dma_addr_t phys;
570
571
desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
572
if (desc) {
573
INIT_LIST_HEAD(&desc->descs_list);
574
dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
575
desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
576
desc->tx_dma_desc.phys = phys;
577
}
578
579
return desc;
580
}
581
582
static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
583
{
584
memset(&desc->lld, 0, sizeof(desc->lld));
585
INIT_LIST_HEAD(&desc->descs_list);
586
desc->direction = DMA_TRANS_NONE;
587
desc->xfer_size = 0;
588
desc->active_xfer = false;
589
}
590
591
/* Call must be protected by lock. */
592
static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
593
{
594
struct at_xdmac_desc *desc;
595
596
if (list_empty(&atchan->free_descs_list)) {
597
desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
598
} else {
599
desc = list_first_entry(&atchan->free_descs_list,
600
struct at_xdmac_desc, desc_node);
601
list_del(&desc->desc_node);
602
at_xdmac_init_used_desc(desc);
603
}
604
605
return desc;
606
}
607
608
static void at_xdmac_queue_desc(struct dma_chan *chan,
609
struct at_xdmac_desc *prev,
610
struct at_xdmac_desc *desc)
611
{
612
if (!prev || !desc)
613
return;
614
615
prev->lld.mbr_nda = desc->tx_dma_desc.phys;
616
prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
617
618
dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
619
__func__, prev, &prev->lld.mbr_nda);
620
}
621
622
static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
623
struct at_xdmac_desc *desc)
624
{
625
if (!desc)
626
return;
627
628
desc->lld.mbr_bc++;
629
630
dev_dbg(chan2dev(chan),
631
"%s: incrementing the block count of the desc 0x%p\n",
632
__func__, desc);
633
}
634
635
static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
636
struct of_dma *of_dma)
637
{
638
struct at_xdmac *atxdmac = of_dma->of_dma_data;
639
struct at_xdmac_chan *atchan;
640
struct dma_chan *chan;
641
struct device *dev = atxdmac->dma.dev;
642
643
if (dma_spec->args_count != 1) {
644
dev_err(dev, "dma phandler args: bad number of args\n");
645
return NULL;
646
}
647
648
chan = dma_get_any_slave_channel(&atxdmac->dma);
649
if (!chan) {
650
dev_err(dev, "can't get a dma channel\n");
651
return NULL;
652
}
653
654
atchan = to_at_xdmac_chan(chan);
655
atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
656
atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
657
atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
658
dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
659
atchan->memif, atchan->perif, atchan->perid);
660
661
return chan;
662
}
663
664
static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
665
enum dma_transfer_direction direction)
666
{
667
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
668
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
669
int csize, dwidth;
670
671
if (direction == DMA_DEV_TO_MEM) {
672
atchan->cfg =
673
AT91_XDMAC_DT_PERID(atchan->perid)
674
| AT_XDMAC_CC_DAM_INCREMENTED_AM
675
| AT_XDMAC_CC_SAM_FIXED_AM
676
| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
677
| AT_XDMAC_CC_DSYNC_PER2MEM
678
| AT_XDMAC_CC_MBSIZE_SIXTEEN
679
| AT_XDMAC_CC_TYPE_PER_TRAN;
680
if (atxdmac->layout->sdif)
681
atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) |
682
AT_XDMAC_CC_SIF(atchan->perif);
683
684
csize = ffs(atchan->sconfig.src_maxburst) - 1;
685
if (csize < 0) {
686
dev_err(chan2dev(chan), "invalid src maxburst value\n");
687
return -EINVAL;
688
}
689
atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
690
dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
691
if (dwidth < 0) {
692
dev_err(chan2dev(chan), "invalid src addr width value\n");
693
return -EINVAL;
694
}
695
atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
696
} else if (direction == DMA_MEM_TO_DEV) {
697
atchan->cfg =
698
AT91_XDMAC_DT_PERID(atchan->perid)
699
| AT_XDMAC_CC_DAM_FIXED_AM
700
| AT_XDMAC_CC_SAM_INCREMENTED_AM
701
| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
702
| AT_XDMAC_CC_DSYNC_MEM2PER
703
| AT_XDMAC_CC_MBSIZE_SIXTEEN
704
| AT_XDMAC_CC_TYPE_PER_TRAN;
705
if (atxdmac->layout->sdif)
706
atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) |
707
AT_XDMAC_CC_SIF(atchan->memif);
708
709
csize = ffs(atchan->sconfig.dst_maxburst) - 1;
710
if (csize < 0) {
711
dev_err(chan2dev(chan), "invalid src maxburst value\n");
712
return -EINVAL;
713
}
714
atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
715
dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
716
if (dwidth < 0) {
717
dev_err(chan2dev(chan), "invalid dst addr width value\n");
718
return -EINVAL;
719
}
720
atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
721
}
722
723
dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
724
725
return 0;
726
}
727
728
/*
729
* Only check that maxburst and addr width values are supported by
730
* the controller but not that the configuration is good to perform the
731
* transfer since we don't know the direction at this stage.
732
*/
733
static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
734
{
735
if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
736
|| (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
737
return -EINVAL;
738
739
if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
740
|| (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
741
return -EINVAL;
742
743
return 0;
744
}
745
746
static int at_xdmac_set_slave_config(struct dma_chan *chan,
747
struct dma_slave_config *sconfig)
748
{
749
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
750
751
if (at_xdmac_check_slave_config(sconfig)) {
752
dev_err(chan2dev(chan), "invalid slave configuration\n");
753
return -EINVAL;
754
}
755
756
memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
757
758
return 0;
759
}
760
761
static struct dma_async_tx_descriptor *
762
at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
763
unsigned int sg_len, enum dma_transfer_direction direction,
764
unsigned long flags, void *context)
765
{
766
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
767
struct at_xdmac_desc *first = NULL, *prev = NULL;
768
struct scatterlist *sg;
769
int i;
770
unsigned int xfer_size = 0;
771
unsigned long irqflags;
772
struct dma_async_tx_descriptor *ret = NULL;
773
774
if (!sgl)
775
return NULL;
776
777
if (!is_slave_direction(direction)) {
778
dev_err(chan2dev(chan), "invalid DMA direction\n");
779
return NULL;
780
}
781
782
dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
783
__func__, sg_len,
784
direction == DMA_MEM_TO_DEV ? "to device" : "from device",
785
flags);
786
787
/* Protect dma_sconfig field that can be modified by set_slave_conf. */
788
spin_lock_irqsave(&atchan->lock, irqflags);
789
790
if (at_xdmac_compute_chan_conf(chan, direction))
791
goto spin_unlock;
792
793
/* Prepare descriptors. */
794
for_each_sg(sgl, sg, sg_len, i) {
795
struct at_xdmac_desc *desc = NULL;
796
u32 len, mem, dwidth, fixed_dwidth;
797
798
len = sg_dma_len(sg);
799
mem = sg_dma_address(sg);
800
if (unlikely(!len)) {
801
dev_err(chan2dev(chan), "sg data length is zero\n");
802
goto spin_unlock;
803
}
804
dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
805
__func__, i, len, mem);
806
807
desc = at_xdmac_get_desc(atchan);
808
if (!desc) {
809
dev_err(chan2dev(chan), "can't get descriptor\n");
810
if (first)
811
list_splice_tail_init(&first->descs_list,
812
&atchan->free_descs_list);
813
goto spin_unlock;
814
}
815
816
/* Linked list descriptor setup. */
817
if (direction == DMA_DEV_TO_MEM) {
818
desc->lld.mbr_sa = atchan->sconfig.src_addr;
819
desc->lld.mbr_da = mem;
820
} else {
821
desc->lld.mbr_sa = mem;
822
desc->lld.mbr_da = atchan->sconfig.dst_addr;
823
}
824
dwidth = at_xdmac_get_dwidth(atchan->cfg);
825
fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
826
? dwidth
827
: AT_XDMAC_CC_DWIDTH_BYTE;
828
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
829
| AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
830
| AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
831
| (len >> fixed_dwidth); /* microblock length */
832
desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
833
AT_XDMAC_CC_DWIDTH(fixed_dwidth);
834
dev_dbg(chan2dev(chan),
835
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
836
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
837
838
/* Chain lld. */
839
if (prev)
840
at_xdmac_queue_desc(chan, prev, desc);
841
842
prev = desc;
843
if (!first)
844
first = desc;
845
846
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
847
__func__, desc, first);
848
list_add_tail(&desc->desc_node, &first->descs_list);
849
xfer_size += len;
850
}
851
852
853
first->tx_dma_desc.flags = flags;
854
first->xfer_size = xfer_size;
855
first->direction = direction;
856
ret = &first->tx_dma_desc;
857
858
spin_unlock:
859
spin_unlock_irqrestore(&atchan->lock, irqflags);
860
return ret;
861
}
862
863
static struct dma_async_tx_descriptor *
864
at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
865
size_t buf_len, size_t period_len,
866
enum dma_transfer_direction direction,
867
unsigned long flags)
868
{
869
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
870
struct at_xdmac_desc *first = NULL, *prev = NULL;
871
unsigned int periods = buf_len / period_len;
872
int i;
873
unsigned long irqflags;
874
875
dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
876
__func__, &buf_addr, buf_len, period_len,
877
direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
878
879
if (!is_slave_direction(direction)) {
880
dev_err(chan2dev(chan), "invalid DMA direction\n");
881
return NULL;
882
}
883
884
if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
885
dev_err(chan2dev(chan), "channel currently used\n");
886
return NULL;
887
}
888
889
if (at_xdmac_compute_chan_conf(chan, direction))
890
return NULL;
891
892
for (i = 0; i < periods; i++) {
893
struct at_xdmac_desc *desc = NULL;
894
895
spin_lock_irqsave(&atchan->lock, irqflags);
896
desc = at_xdmac_get_desc(atchan);
897
if (!desc) {
898
dev_err(chan2dev(chan), "can't get descriptor\n");
899
if (first)
900
list_splice_tail_init(&first->descs_list,
901
&atchan->free_descs_list);
902
spin_unlock_irqrestore(&atchan->lock, irqflags);
903
return NULL;
904
}
905
spin_unlock_irqrestore(&atchan->lock, irqflags);
906
dev_dbg(chan2dev(chan),
907
"%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
908
__func__, desc, &desc->tx_dma_desc.phys);
909
910
if (direction == DMA_DEV_TO_MEM) {
911
desc->lld.mbr_sa = atchan->sconfig.src_addr;
912
desc->lld.mbr_da = buf_addr + i * period_len;
913
} else {
914
desc->lld.mbr_sa = buf_addr + i * period_len;
915
desc->lld.mbr_da = atchan->sconfig.dst_addr;
916
}
917
desc->lld.mbr_cfg = atchan->cfg;
918
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
919
| AT_XDMAC_MBR_UBC_NDEN
920
| AT_XDMAC_MBR_UBC_NSEN
921
| period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
922
923
dev_dbg(chan2dev(chan),
924
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
925
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
926
927
/* Chain lld. */
928
if (prev)
929
at_xdmac_queue_desc(chan, prev, desc);
930
931
prev = desc;
932
if (!first)
933
first = desc;
934
935
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
936
__func__, desc, first);
937
list_add_tail(&desc->desc_node, &first->descs_list);
938
}
939
940
at_xdmac_queue_desc(chan, prev, first);
941
first->tx_dma_desc.flags = flags;
942
first->xfer_size = buf_len;
943
first->direction = direction;
944
945
return &first->tx_dma_desc;
946
}
947
948
static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
949
{
950
u32 width;
951
952
/*
953
* Check address alignment to select the greater data width we
954
* can use.
955
*
956
* Some XDMAC implementations don't provide dword transfer, in
957
* this case selecting dword has the same behavior as
958
* selecting word transfers.
959
*/
960
if (!(addr & 7)) {
961
width = AT_XDMAC_CC_DWIDTH_DWORD;
962
dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
963
} else if (!(addr & 3)) {
964
width = AT_XDMAC_CC_DWIDTH_WORD;
965
dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
966
} else if (!(addr & 1)) {
967
width = AT_XDMAC_CC_DWIDTH_HALFWORD;
968
dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
969
} else {
970
width = AT_XDMAC_CC_DWIDTH_BYTE;
971
dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
972
}
973
974
return width;
975
}
976
977
static struct at_xdmac_desc *
978
at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
979
struct at_xdmac_chan *atchan,
980
struct at_xdmac_desc *prev,
981
dma_addr_t src, dma_addr_t dst,
982
struct dma_interleaved_template *xt,
983
struct data_chunk *chunk)
984
{
985
struct at_xdmac_desc *desc;
986
u32 dwidth;
987
unsigned long flags;
988
size_t ublen;
989
/*
990
* WARNING: The channel configuration is set here since there is no
991
* dmaengine_slave_config call in this case. Moreover we don't know the
992
* direction, it involves we can't dynamically set the source and dest
993
* interface so we have to use the same one. Only interface 0 allows EBI
994
* access. Hopefully we can access DDR through both ports (at least on
995
* SAMA5D4x), so we can use the same interface for source and dest,
996
* that solves the fact we don't know the direction.
997
* ERRATA: Even if useless for memory transfers, the PERID has to not
998
* match the one of another channel. If not, it could lead to spurious
999
* flag status.
1000
* For SAMA7G5x case, the SIF and DIF fields are no longer used.
1001
* Thus, no need to have the SIF/DIF interfaces here.
1002
* For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1003
* zero.
1004
*/
1005
u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1006
| AT_XDMAC_CC_MBSIZE_SIXTEEN
1007
| AT_XDMAC_CC_TYPE_MEM_TRAN;
1008
1009
dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
1010
if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1011
dev_dbg(chan2dev(chan),
1012
"%s: chunk too big (%zu, max size %lu)...\n",
1013
__func__, chunk->size,
1014
AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
1015
return NULL;
1016
}
1017
1018
if (prev)
1019
dev_dbg(chan2dev(chan),
1020
"Adding items at the end of desc 0x%p\n", prev);
1021
1022
if (xt->src_inc) {
1023
if (xt->src_sgl)
1024
chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
1025
else
1026
chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
1027
}
1028
1029
if (xt->dst_inc) {
1030
if (xt->dst_sgl)
1031
chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
1032
else
1033
chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
1034
}
1035
1036
spin_lock_irqsave(&atchan->lock, flags);
1037
desc = at_xdmac_get_desc(atchan);
1038
spin_unlock_irqrestore(&atchan->lock, flags);
1039
if (!desc) {
1040
dev_err(chan2dev(chan), "can't get descriptor\n");
1041
return NULL;
1042
}
1043
1044
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1045
1046
ublen = chunk->size >> dwidth;
1047
1048
desc->lld.mbr_sa = src;
1049
desc->lld.mbr_da = dst;
1050
desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
1051
desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
1052
1053
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1054
| AT_XDMAC_MBR_UBC_NDEN
1055
| AT_XDMAC_MBR_UBC_NSEN
1056
| ublen;
1057
desc->lld.mbr_cfg = chan_cc;
1058
1059
dev_dbg(chan2dev(chan),
1060
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1061
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
1062
desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1063
1064
/* Chain lld. */
1065
if (prev)
1066
at_xdmac_queue_desc(chan, prev, desc);
1067
1068
return desc;
1069
}
1070
1071
static struct dma_async_tx_descriptor *
1072
at_xdmac_prep_interleaved(struct dma_chan *chan,
1073
struct dma_interleaved_template *xt,
1074
unsigned long flags)
1075
{
1076
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1077
struct at_xdmac_desc *prev = NULL, *first = NULL;
1078
dma_addr_t dst_addr, src_addr;
1079
size_t src_skip = 0, dst_skip = 0, len = 0;
1080
struct data_chunk *chunk;
1081
int i;
1082
1083
if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
1084
return NULL;
1085
1086
/*
1087
* TODO: Handle the case where we have to repeat a chain of
1088
* descriptors...
1089
*/
1090
if ((xt->numf > 1) && (xt->frame_size > 1))
1091
return NULL;
1092
1093
dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
1094
__func__, &xt->src_start, &xt->dst_start, xt->numf,
1095
xt->frame_size, flags);
1096
1097
src_addr = xt->src_start;
1098
dst_addr = xt->dst_start;
1099
1100
if (xt->numf > 1) {
1101
first = at_xdmac_interleaved_queue_desc(chan, atchan,
1102
NULL,
1103
src_addr, dst_addr,
1104
xt, xt->sgl);
1105
if (!first)
1106
return NULL;
1107
1108
/* Length of the block is (BLEN+1) microblocks. */
1109
for (i = 0; i < xt->numf - 1; i++)
1110
at_xdmac_increment_block_count(chan, first);
1111
1112
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1113
__func__, first, first);
1114
list_add_tail(&first->desc_node, &first->descs_list);
1115
} else {
1116
for (i = 0; i < xt->frame_size; i++) {
1117
size_t src_icg = 0, dst_icg = 0;
1118
struct at_xdmac_desc *desc;
1119
1120
chunk = xt->sgl + i;
1121
1122
dst_icg = dmaengine_get_dst_icg(xt, chunk);
1123
src_icg = dmaengine_get_src_icg(xt, chunk);
1124
1125
src_skip = chunk->size + src_icg;
1126
dst_skip = chunk->size + dst_icg;
1127
1128
dev_dbg(chan2dev(chan),
1129
"%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
1130
__func__, chunk->size, src_icg, dst_icg);
1131
1132
desc = at_xdmac_interleaved_queue_desc(chan, atchan,
1133
prev,
1134
src_addr, dst_addr,
1135
xt, chunk);
1136
if (!desc) {
1137
if (first)
1138
list_splice_tail_init(&first->descs_list,
1139
&atchan->free_descs_list);
1140
return NULL;
1141
}
1142
1143
if (!first)
1144
first = desc;
1145
1146
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1147
__func__, desc, first);
1148
list_add_tail(&desc->desc_node, &first->descs_list);
1149
1150
if (xt->src_sgl)
1151
src_addr += src_skip;
1152
1153
if (xt->dst_sgl)
1154
dst_addr += dst_skip;
1155
1156
len += chunk->size;
1157
prev = desc;
1158
}
1159
}
1160
1161
first->tx_dma_desc.cookie = -EBUSY;
1162
first->tx_dma_desc.flags = flags;
1163
first->xfer_size = len;
1164
1165
return &first->tx_dma_desc;
1166
}
1167
1168
static struct dma_async_tx_descriptor *
1169
at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1170
size_t len, unsigned long flags)
1171
{
1172
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1173
struct at_xdmac_desc *first = NULL, *prev = NULL;
1174
size_t remaining_size = len, xfer_size = 0, ublen;
1175
dma_addr_t src_addr = src, dst_addr = dest;
1176
u32 dwidth;
1177
/*
1178
* WARNING: We don't know the direction, it involves we can't
1179
* dynamically set the source and dest interface so we have to use the
1180
* same one. Only interface 0 allows EBI access. Hopefully we can
1181
* access DDR through both ports (at least on SAMA5D4x), so we can use
1182
* the same interface for source and dest, that solves the fact we
1183
* don't know the direction.
1184
* ERRATA: Even if useless for memory transfers, the PERID has to not
1185
* match the one of another channel. If not, it could lead to spurious
1186
* flag status.
1187
* For SAMA7G5x case, the SIF and DIF fields are no longer used.
1188
* Thus, no need to have the SIF/DIF interfaces here.
1189
* For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1190
* zero.
1191
*/
1192
u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1193
| AT_XDMAC_CC_DAM_INCREMENTED_AM
1194
| AT_XDMAC_CC_SAM_INCREMENTED_AM
1195
| AT_XDMAC_CC_MBSIZE_SIXTEEN
1196
| AT_XDMAC_CC_TYPE_MEM_TRAN;
1197
unsigned long irqflags;
1198
1199
dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
1200
__func__, &src, &dest, len, flags);
1201
1202
if (unlikely(!len))
1203
return NULL;
1204
1205
dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
1206
1207
/* Prepare descriptors. */
1208
while (remaining_size) {
1209
struct at_xdmac_desc *desc = NULL;
1210
1211
dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
1212
1213
spin_lock_irqsave(&atchan->lock, irqflags);
1214
desc = at_xdmac_get_desc(atchan);
1215
spin_unlock_irqrestore(&atchan->lock, irqflags);
1216
if (!desc) {
1217
dev_err(chan2dev(chan), "can't get descriptor\n");
1218
if (first)
1219
list_splice_tail_init(&first->descs_list,
1220
&atchan->free_descs_list);
1221
return NULL;
1222
}
1223
1224
/* Update src and dest addresses. */
1225
src_addr += xfer_size;
1226
dst_addr += xfer_size;
1227
1228
if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
1229
xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
1230
else
1231
xfer_size = remaining_size;
1232
1233
dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
1234
1235
/* Check remaining length and change data width if needed. */
1236
dwidth = at_xdmac_align_width(chan,
1237
src_addr | dst_addr | xfer_size);
1238
chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
1239
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1240
1241
ublen = xfer_size >> dwidth;
1242
remaining_size -= xfer_size;
1243
1244
desc->lld.mbr_sa = src_addr;
1245
desc->lld.mbr_da = dst_addr;
1246
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
1247
| AT_XDMAC_MBR_UBC_NDEN
1248
| AT_XDMAC_MBR_UBC_NSEN
1249
| ublen;
1250
desc->lld.mbr_cfg = chan_cc;
1251
1252
dev_dbg(chan2dev(chan),
1253
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1254
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1255
1256
/* Chain lld. */
1257
if (prev)
1258
at_xdmac_queue_desc(chan, prev, desc);
1259
1260
prev = desc;
1261
if (!first)
1262
first = desc;
1263
1264
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1265
__func__, desc, first);
1266
list_add_tail(&desc->desc_node, &first->descs_list);
1267
}
1268
1269
first->tx_dma_desc.flags = flags;
1270
first->xfer_size = len;
1271
1272
return &first->tx_dma_desc;
1273
}
1274
1275
static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1276
struct at_xdmac_chan *atchan,
1277
dma_addr_t dst_addr,
1278
size_t len,
1279
int value)
1280
{
1281
struct at_xdmac_desc *desc;
1282
unsigned long flags;
1283
size_t ublen;
1284
u32 dwidth;
1285
char pattern;
1286
/*
1287
* WARNING: The channel configuration is set here since there is no
1288
* dmaengine_slave_config call in this case. Moreover we don't know the
1289
* direction, it involves we can't dynamically set the source and dest
1290
* interface so we have to use the same one. Only interface 0 allows EBI
1291
* access. Hopefully we can access DDR through both ports (at least on
1292
* SAMA5D4x), so we can use the same interface for source and dest,
1293
* that solves the fact we don't know the direction.
1294
* ERRATA: Even if useless for memory transfers, the PERID has to not
1295
* match the one of another channel. If not, it could lead to spurious
1296
* flag status.
1297
* For SAMA7G5x case, the SIF and DIF fields are no longer used.
1298
* Thus, no need to have the SIF/DIF interfaces here.
1299
* For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1300
* zero.
1301
*/
1302
u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1303
| AT_XDMAC_CC_DAM_UBS_AM
1304
| AT_XDMAC_CC_SAM_INCREMENTED_AM
1305
| AT_XDMAC_CC_MBSIZE_SIXTEEN
1306
| AT_XDMAC_CC_MEMSET_HW_MODE
1307
| AT_XDMAC_CC_TYPE_MEM_TRAN;
1308
1309
dwidth = at_xdmac_align_width(chan, dst_addr);
1310
1311
if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1312
dev_err(chan2dev(chan),
1313
"%s: Transfer too large, aborting...\n",
1314
__func__);
1315
return NULL;
1316
}
1317
1318
spin_lock_irqsave(&atchan->lock, flags);
1319
desc = at_xdmac_get_desc(atchan);
1320
spin_unlock_irqrestore(&atchan->lock, flags);
1321
if (!desc) {
1322
dev_err(chan2dev(chan), "can't get descriptor\n");
1323
return NULL;
1324
}
1325
1326
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1327
1328
/* Only the first byte of value is to be used according to dmaengine */
1329
pattern = (char)value;
1330
1331
ublen = len >> dwidth;
1332
1333
desc->lld.mbr_da = dst_addr;
1334
desc->lld.mbr_ds = (pattern << 24) |
1335
(pattern << 16) |
1336
(pattern << 8) |
1337
pattern;
1338
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1339
| AT_XDMAC_MBR_UBC_NDEN
1340
| AT_XDMAC_MBR_UBC_NSEN
1341
| ublen;
1342
desc->lld.mbr_cfg = chan_cc;
1343
1344
dev_dbg(chan2dev(chan),
1345
"%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1346
__func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
1347
desc->lld.mbr_cfg);
1348
1349
return desc;
1350
}
1351
1352
static struct dma_async_tx_descriptor *
1353
at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1354
size_t len, unsigned long flags)
1355
{
1356
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1357
struct at_xdmac_desc *desc;
1358
1359
dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
1360
__func__, &dest, len, value, flags);
1361
1362
if (unlikely(!len))
1363
return NULL;
1364
1365
desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
1366
if (!desc)
1367
return NULL;
1368
list_add_tail(&desc->desc_node, &desc->descs_list);
1369
1370
desc->tx_dma_desc.cookie = -EBUSY;
1371
desc->tx_dma_desc.flags = flags;
1372
desc->xfer_size = len;
1373
1374
return &desc->tx_dma_desc;
1375
}
1376
1377
static struct dma_async_tx_descriptor *
1378
at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1379
unsigned int sg_len, int value,
1380
unsigned long flags)
1381
{
1382
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1383
struct at_xdmac_desc *desc, *pdesc = NULL,
1384
*ppdesc = NULL, *first = NULL;
1385
struct scatterlist *sg, *psg = NULL, *ppsg = NULL;
1386
size_t stride = 0, pstride = 0, len = 0;
1387
int i;
1388
1389
if (!sgl)
1390
return NULL;
1391
1392
dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
1393
__func__, sg_len, value, flags);
1394
1395
/* Prepare descriptors. */
1396
for_each_sg(sgl, sg, sg_len, i) {
1397
dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1398
__func__, &sg_dma_address(sg), sg_dma_len(sg),
1399
value, flags);
1400
desc = at_xdmac_memset_create_desc(chan, atchan,
1401
sg_dma_address(sg),
1402
sg_dma_len(sg),
1403
value);
1404
if (!desc && first)
1405
list_splice_tail_init(&first->descs_list,
1406
&atchan->free_descs_list);
1407
1408
if (!first)
1409
first = desc;
1410
1411
/* Update our strides */
1412
pstride = stride;
1413
if (psg)
1414
stride = sg_dma_address(sg) -
1415
(sg_dma_address(psg) + sg_dma_len(psg));
1416
1417
/*
1418
* The scatterlist API gives us only the address and
1419
* length of each elements.
1420
*
1421
* Unfortunately, we don't have the stride, which we
1422
* will need to compute.
1423
*
1424
* That make us end up in a situation like this one:
1425
* len stride len stride len
1426
* +-------+ +-------+ +-------+
1427
* | N-2 | | N-1 | | N |
1428
* +-------+ +-------+ +-------+
1429
*
1430
* We need all these three elements (N-2, N-1 and N)
1431
* to actually take the decision on whether we need to
1432
* queue N-1 or reuse N-2.
1433
*
1434
* We will only consider N if it is the last element.
1435
*/
1436
if (ppdesc && pdesc) {
1437
if ((stride == pstride) &&
1438
(sg_dma_len(ppsg) == sg_dma_len(psg))) {
1439
dev_dbg(chan2dev(chan),
1440
"%s: desc 0x%p can be merged with desc 0x%p\n",
1441
__func__, pdesc, ppdesc);
1442
1443
/*
1444
* Increment the block count of the
1445
* N-2 descriptor
1446
*/
1447
at_xdmac_increment_block_count(chan, ppdesc);
1448
ppdesc->lld.mbr_dus = stride;
1449
1450
/*
1451
* Put back the N-1 descriptor in the
1452
* free descriptor list
1453
*/
1454
list_add_tail(&pdesc->desc_node,
1455
&atchan->free_descs_list);
1456
1457
/*
1458
* Make our N-1 descriptor pointer
1459
* point to the N-2 since they were
1460
* actually merged.
1461
*/
1462
pdesc = ppdesc;
1463
1464
/*
1465
* Rule out the case where we don't have
1466
* pstride computed yet (our second sg
1467
* element)
1468
*
1469
* We also want to catch the case where there
1470
* would be a negative stride,
1471
*/
1472
} else if (pstride ||
1473
sg_dma_address(sg) < sg_dma_address(psg)) {
1474
/*
1475
* Queue the N-1 descriptor after the
1476
* N-2
1477
*/
1478
at_xdmac_queue_desc(chan, ppdesc, pdesc);
1479
1480
/*
1481
* Add the N-1 descriptor to the list
1482
* of the descriptors used for this
1483
* transfer
1484
*/
1485
list_add_tail(&desc->desc_node,
1486
&first->descs_list);
1487
dev_dbg(chan2dev(chan),
1488
"%s: add desc 0x%p to descs_list 0x%p\n",
1489
__func__, desc, first);
1490
}
1491
}
1492
1493
/*
1494
* If we are the last element, just see if we have the
1495
* same size than the previous element.
1496
*
1497
* If so, we can merge it with the previous descriptor
1498
* since we don't care about the stride anymore.
1499
*/
1500
if ((i == (sg_len - 1)) &&
1501
sg_dma_len(psg) == sg_dma_len(sg)) {
1502
dev_dbg(chan2dev(chan),
1503
"%s: desc 0x%p can be merged with desc 0x%p\n",
1504
__func__, desc, pdesc);
1505
1506
/*
1507
* Increment the block count of the N-1
1508
* descriptor
1509
*/
1510
at_xdmac_increment_block_count(chan, pdesc);
1511
pdesc->lld.mbr_dus = stride;
1512
1513
/*
1514
* Put back the N descriptor in the free
1515
* descriptor list
1516
*/
1517
list_add_tail(&desc->desc_node,
1518
&atchan->free_descs_list);
1519
}
1520
1521
/* Update our descriptors */
1522
ppdesc = pdesc;
1523
pdesc = desc;
1524
1525
/* Update our scatter pointers */
1526
ppsg = psg;
1527
psg = sg;
1528
1529
len += sg_dma_len(sg);
1530
}
1531
1532
first->tx_dma_desc.cookie = -EBUSY;
1533
first->tx_dma_desc.flags = flags;
1534
first->xfer_size = len;
1535
1536
return &first->tx_dma_desc;
1537
}
1538
1539
static enum dma_status
1540
at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1541
struct dma_tx_state *txstate)
1542
{
1543
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1544
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1545
struct at_xdmac_desc *desc, *_desc, *iter;
1546
struct list_head *descs_list;
1547
enum dma_status ret;
1548
int residue, retry, pm_status;
1549
u32 cur_nda, check_nda, cur_ubc, mask, value;
1550
u8 dwidth = 0;
1551
unsigned long flags;
1552
bool initd;
1553
1554
ret = dma_cookie_status(chan, cookie, txstate);
1555
if (ret == DMA_COMPLETE || !txstate)
1556
return ret;
1557
1558
pm_status = pm_runtime_resume_and_get(atxdmac->dev);
1559
if (pm_status < 0)
1560
return DMA_ERROR;
1561
1562
spin_lock_irqsave(&atchan->lock, flags);
1563
1564
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1565
1566
/*
1567
* If the transfer has not been started yet, don't need to compute the
1568
* residue, it's the transfer length.
1569
*/
1570
if (!desc->active_xfer) {
1571
dma_set_residue(txstate, desc->xfer_size);
1572
goto spin_unlock;
1573
}
1574
1575
residue = desc->xfer_size;
1576
/*
1577
* Flush FIFO: only relevant when the transfer is source peripheral
1578
* synchronized. Flush is needed before reading CUBC because data in
1579
* the FIFO are not reported by CUBC. Reporting a residue of the
1580
* transfer length while we have data in FIFO can cause issue.
1581
* Usecase: atmel USART has a timeout which means I have received
1582
* characters but there is no more character received for a while. On
1583
* timeout, it requests the residue. If the data are in the DMA FIFO,
1584
* we will return a residue of the transfer length. It means no data
1585
* received. If an application is waiting for these data, it will hang
1586
* since we won't have another USART timeout without receiving new
1587
* data.
1588
*/
1589
mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1590
value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
1591
if ((desc->lld.mbr_cfg & mask) == value) {
1592
at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1593
while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1594
cpu_relax();
1595
}
1596
1597
/*
1598
* The easiest way to compute the residue should be to pause the DMA
1599
* but doing this can lead to miss some data as some devices don't
1600
* have FIFO.
1601
* We need to read several registers because:
1602
* - DMA is running therefore a descriptor change is possible while
1603
* reading these registers
1604
* - When the block transfer is done, the value of the CUBC register
1605
* is set to its initial value until the fetch of the next descriptor.
1606
* This value will corrupt the residue calculation so we have to skip
1607
* it.
1608
*
1609
* INITD -------- ------------
1610
* |____________________|
1611
* _______________________ _______________
1612
* NDA @desc2 \/ @desc3
1613
* _______________________/\_______________
1614
* __________ ___________ _______________
1615
* CUBC 0 \/ MAX desc1 \/ MAX desc2
1616
* __________/\___________/\_______________
1617
*
1618
* Since descriptors are aligned on 64 bits, we can assume that
1619
* the update of NDA and CUBC is atomic.
1620
* Memory barriers are used to ensure the read order of the registers.
1621
* A max number of retries is set because unlikely it could never ends.
1622
*/
1623
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1624
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1625
rmb();
1626
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1627
rmb();
1628
initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1629
rmb();
1630
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1631
rmb();
1632
1633
if ((check_nda == cur_nda) && initd)
1634
break;
1635
}
1636
1637
if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1638
ret = DMA_ERROR;
1639
goto spin_unlock;
1640
}
1641
1642
/*
1643
* Flush FIFO: only relevant when the transfer is source peripheral
1644
* synchronized. Another flush is needed here because CUBC is updated
1645
* when the controller sends the data write command. It can lead to
1646
* report data that are not written in the memory or the device. The
1647
* FIFO flush ensures that data are really written.
1648
*/
1649
if ((desc->lld.mbr_cfg & mask) == value) {
1650
at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1651
while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1652
cpu_relax();
1653
}
1654
1655
/*
1656
* Remove size of all microblocks already transferred and the current
1657
* one. Then add the remaining size to transfer of the current
1658
* microblock.
1659
*/
1660
descs_list = &desc->descs_list;
1661
list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
1662
dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
1663
residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
1664
if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
1665
desc = iter;
1666
break;
1667
}
1668
}
1669
residue += cur_ubc << dwidth;
1670
1671
dma_set_residue(txstate, residue);
1672
1673
dev_dbg(chan2dev(chan),
1674
"%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1675
__func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1676
1677
spin_unlock:
1678
spin_unlock_irqrestore(&atchan->lock, flags);
1679
pm_runtime_mark_last_busy(atxdmac->dev);
1680
pm_runtime_put_autosuspend(atxdmac->dev);
1681
return ret;
1682
}
1683
1684
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1685
{
1686
struct at_xdmac_desc *desc;
1687
1688
/*
1689
* If channel is enabled, do nothing, advance_work will be triggered
1690
* after the interruption.
1691
*/
1692
if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list))
1693
return;
1694
1695
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1696
xfer_node);
1697
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1698
if (!desc->active_xfer)
1699
at_xdmac_start_xfer(atchan, desc);
1700
}
1701
1702
static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1703
{
1704
struct at_xdmac_desc *desc;
1705
struct dma_async_tx_descriptor *txd;
1706
1707
spin_lock_irq(&atchan->lock);
1708
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1709
__func__, atchan->irq_status);
1710
if (list_empty(&atchan->xfers_list)) {
1711
spin_unlock_irq(&atchan->lock);
1712
return;
1713
}
1714
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1715
xfer_node);
1716
spin_unlock_irq(&atchan->lock);
1717
txd = &desc->tx_dma_desc;
1718
if (txd->flags & DMA_PREP_INTERRUPT)
1719
dmaengine_desc_get_callback_invoke(txd, NULL);
1720
}
1721
1722
/* Called with atchan->lock held. */
1723
static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
1724
{
1725
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1726
struct at_xdmac_desc *bad_desc;
1727
int ret;
1728
1729
ret = pm_runtime_resume_and_get(atxdmac->dev);
1730
if (ret < 0)
1731
return;
1732
1733
/*
1734
* The descriptor currently at the head of the active list is
1735
* broken. Since we don't have any way to report errors, we'll
1736
* just have to scream loudly and try to continue with other
1737
* descriptors queued (if any).
1738
*/
1739
if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1740
dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1741
if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1742
dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1743
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1744
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1745
1746
/* Channel must be disabled first as it's not done automatically */
1747
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1748
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1749
cpu_relax();
1750
1751
bad_desc = list_first_entry(&atchan->xfers_list,
1752
struct at_xdmac_desc,
1753
xfer_node);
1754
1755
/* Print bad descriptor's details if needed */
1756
dev_dbg(chan2dev(&atchan->chan),
1757
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
1758
__func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
1759
bad_desc->lld.mbr_ubc);
1760
1761
pm_runtime_mark_last_busy(atxdmac->dev);
1762
pm_runtime_put_autosuspend(atxdmac->dev);
1763
1764
/* Then continue with usual descriptor management */
1765
}
1766
1767
static void at_xdmac_tasklet(struct tasklet_struct *t)
1768
{
1769
struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
1770
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1771
struct at_xdmac_desc *desc;
1772
struct dma_async_tx_descriptor *txd;
1773
u32 error_mask;
1774
1775
if (at_xdmac_chan_is_cyclic(atchan))
1776
return at_xdmac_handle_cyclic(atchan);
1777
1778
error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
1779
AT_XDMAC_CIS_ROIS;
1780
1781
spin_lock_irq(&atchan->lock);
1782
1783
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1784
__func__, atchan->irq_status);
1785
1786
if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
1787
!(atchan->irq_status & error_mask)) {
1788
spin_unlock_irq(&atchan->lock);
1789
return;
1790
}
1791
1792
if (atchan->irq_status & error_mask)
1793
at_xdmac_handle_error(atchan);
1794
1795
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1796
xfer_node);
1797
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1798
if (!desc->active_xfer) {
1799
dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
1800
spin_unlock_irq(&atchan->lock);
1801
return;
1802
}
1803
1804
txd = &desc->tx_dma_desc;
1805
dma_cookie_complete(txd);
1806
/* Remove the transfer from the transfer list. */
1807
list_del(&desc->xfer_node);
1808
spin_unlock_irq(&atchan->lock);
1809
1810
if (txd->flags & DMA_PREP_INTERRUPT)
1811
dmaengine_desc_get_callback_invoke(txd, NULL);
1812
1813
dma_run_dependencies(txd);
1814
1815
spin_lock_irq(&atchan->lock);
1816
/* Move the xfer descriptors into the free descriptors list. */
1817
list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
1818
at_xdmac_advance_work(atchan);
1819
spin_unlock_irq(&atchan->lock);
1820
1821
/*
1822
* Decrement runtime PM ref counter incremented in
1823
* at_xdmac_start_xfer().
1824
*/
1825
pm_runtime_mark_last_busy(atxdmac->dev);
1826
pm_runtime_put_autosuspend(atxdmac->dev);
1827
}
1828
1829
static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1830
{
1831
struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
1832
struct at_xdmac_chan *atchan;
1833
u32 imr, status, pending;
1834
u32 chan_imr, chan_status;
1835
int i, ret = IRQ_NONE;
1836
1837
do {
1838
imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1839
status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1840
pending = status & imr;
1841
1842
dev_vdbg(atxdmac->dma.dev,
1843
"%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1844
__func__, status, imr, pending);
1845
1846
if (!pending)
1847
break;
1848
1849
/* We have to find which channel has generated the interrupt. */
1850
for (i = 0; i < atxdmac->dma.chancnt; i++) {
1851
if (!((1 << i) & pending))
1852
continue;
1853
1854
atchan = &atxdmac->chan[i];
1855
chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1856
chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1857
atchan->irq_status = chan_status & chan_imr;
1858
dev_vdbg(atxdmac->dma.dev,
1859
"%s: chan%d: imr=0x%x, status=0x%x\n",
1860
__func__, i, chan_imr, chan_status);
1861
dev_vdbg(chan2dev(&atchan->chan),
1862
"%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1863
__func__,
1864
at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1865
at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1866
at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1867
at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1868
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1869
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1870
1871
if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1872
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1873
1874
tasklet_schedule(&atchan->tasklet);
1875
ret = IRQ_HANDLED;
1876
}
1877
1878
} while (pending);
1879
1880
return ret;
1881
}
1882
1883
static void at_xdmac_issue_pending(struct dma_chan *chan)
1884
{
1885
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1886
unsigned long flags;
1887
1888
dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1889
1890
spin_lock_irqsave(&atchan->lock, flags);
1891
at_xdmac_advance_work(atchan);
1892
spin_unlock_irqrestore(&atchan->lock, flags);
1893
1894
return;
1895
}
1896
1897
static int at_xdmac_device_config(struct dma_chan *chan,
1898
struct dma_slave_config *config)
1899
{
1900
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1901
int ret;
1902
unsigned long flags;
1903
1904
dev_dbg(chan2dev(chan), "%s\n", __func__);
1905
1906
spin_lock_irqsave(&atchan->lock, flags);
1907
ret = at_xdmac_set_slave_config(chan, config);
1908
spin_unlock_irqrestore(&atchan->lock, flags);
1909
1910
return ret;
1911
}
1912
1913
static void at_xdmac_device_pause_set(struct at_xdmac *atxdmac,
1914
struct at_xdmac_chan *atchan)
1915
{
1916
at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
1917
while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) &
1918
(AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1919
cpu_relax();
1920
}
1921
1922
static void at_xdmac_device_pause_internal(struct at_xdmac_chan *atchan)
1923
{
1924
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1925
unsigned long flags;
1926
1927
spin_lock_irqsave(&atchan->lock, flags);
1928
set_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
1929
at_xdmac_device_pause_set(atxdmac, atchan);
1930
spin_unlock_irqrestore(&atchan->lock, flags);
1931
}
1932
1933
static int at_xdmac_device_pause(struct dma_chan *chan)
1934
{
1935
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1936
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1937
unsigned long flags;
1938
int ret;
1939
1940
dev_dbg(chan2dev(chan), "%s\n", __func__);
1941
1942
if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1943
return 0;
1944
1945
ret = pm_runtime_resume_and_get(atxdmac->dev);
1946
if (ret < 0)
1947
return ret;
1948
1949
spin_lock_irqsave(&atchan->lock, flags);
1950
1951
at_xdmac_device_pause_set(atxdmac, atchan);
1952
/* Decrement runtime PM ref counter for each active descriptor. */
1953
at_xdmac_runtime_suspend_descriptors(atchan);
1954
1955
spin_unlock_irqrestore(&atchan->lock, flags);
1956
1957
pm_runtime_mark_last_busy(atxdmac->dev);
1958
pm_runtime_put_autosuspend(atxdmac->dev);
1959
1960
return 0;
1961
}
1962
1963
static void at_xdmac_device_resume_internal(struct at_xdmac_chan *atchan)
1964
{
1965
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1966
unsigned long flags;
1967
1968
spin_lock_irqsave(&atchan->lock, flags);
1969
at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
1970
clear_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
1971
spin_unlock_irqrestore(&atchan->lock, flags);
1972
}
1973
1974
static int at_xdmac_device_resume(struct dma_chan *chan)
1975
{
1976
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1977
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1978
unsigned long flags;
1979
int ret;
1980
1981
dev_dbg(chan2dev(chan), "%s\n", __func__);
1982
1983
ret = pm_runtime_resume_and_get(atxdmac->dev);
1984
if (ret < 0)
1985
return ret;
1986
1987
spin_lock_irqsave(&atchan->lock, flags);
1988
if (!at_xdmac_chan_is_paused(atchan))
1989
goto unlock;
1990
1991
/* Increment runtime PM ref counter for each active descriptor. */
1992
ret = at_xdmac_runtime_resume_descriptors(atchan);
1993
if (ret < 0)
1994
goto unlock;
1995
1996
at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
1997
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1998
1999
unlock:
2000
spin_unlock_irqrestore(&atchan->lock, flags);
2001
pm_runtime_mark_last_busy(atxdmac->dev);
2002
pm_runtime_put_autosuspend(atxdmac->dev);
2003
2004
return ret;
2005
}
2006
2007
static int at_xdmac_device_terminate_all(struct dma_chan *chan)
2008
{
2009
struct at_xdmac_desc *desc, *_desc;
2010
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2011
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
2012
unsigned long flags;
2013
int ret;
2014
2015
dev_dbg(chan2dev(chan), "%s\n", __func__);
2016
2017
ret = pm_runtime_resume_and_get(atxdmac->dev);
2018
if (ret < 0)
2019
return ret;
2020
2021
spin_lock_irqsave(&atchan->lock, flags);
2022
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
2023
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
2024
cpu_relax();
2025
2026
/* Cancel all pending transfers. */
2027
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
2028
list_del(&desc->xfer_node);
2029
list_splice_tail_init(&desc->descs_list,
2030
&atchan->free_descs_list);
2031
/*
2032
* We incremented the runtime PM reference count on
2033
* at_xdmac_start_xfer() for this descriptor. Now it's time
2034
* to release it.
2035
*/
2036
if (desc->active_xfer)
2037
pm_runtime_put_noidle(atxdmac->dev);
2038
}
2039
2040
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
2041
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
2042
spin_unlock_irqrestore(&atchan->lock, flags);
2043
2044
pm_runtime_mark_last_busy(atxdmac->dev);
2045
pm_runtime_put_autosuspend(atxdmac->dev);
2046
2047
return 0;
2048
}
2049
2050
static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
2051
{
2052
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2053
struct at_xdmac_desc *desc;
2054
int i;
2055
2056
if (at_xdmac_chan_is_enabled(atchan)) {
2057
dev_err(chan2dev(chan),
2058
"can't allocate channel resources (channel enabled)\n");
2059
return -EIO;
2060
}
2061
2062
if (!list_empty(&atchan->free_descs_list)) {
2063
dev_err(chan2dev(chan),
2064
"can't allocate channel resources (channel not free from a previous use)\n");
2065
return -EIO;
2066
}
2067
2068
for (i = 0; i < init_nr_desc_per_channel; i++) {
2069
desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
2070
if (!desc) {
2071
if (i == 0) {
2072
dev_warn(chan2dev(chan),
2073
"can't allocate any descriptors\n");
2074
return -EIO;
2075
}
2076
dev_warn(chan2dev(chan),
2077
"only %d descriptors have been allocated\n", i);
2078
break;
2079
}
2080
list_add_tail(&desc->desc_node, &atchan->free_descs_list);
2081
}
2082
2083
dma_cookie_init(chan);
2084
2085
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
2086
2087
return i;
2088
}
2089
2090
static void at_xdmac_free_chan_resources(struct dma_chan *chan)
2091
{
2092
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2093
struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
2094
struct at_xdmac_desc *desc, *_desc;
2095
2096
list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
2097
dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
2098
list_del(&desc->desc_node);
2099
dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
2100
}
2101
2102
return;
2103
}
2104
2105
static void at_xdmac_axi_config(struct platform_device *pdev)
2106
{
2107
struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2108
bool dev_m2m = false;
2109
u32 dma_requests;
2110
2111
if (!atxdmac->layout->axi_config)
2112
return; /* Not supported */
2113
2114
if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
2115
&dma_requests)) {
2116
dev_info(&pdev->dev, "controller in mem2mem mode.\n");
2117
dev_m2m = true;
2118
}
2119
2120
if (dev_m2m) {
2121
at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
2122
at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
2123
} else {
2124
at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
2125
at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
2126
}
2127
}
2128
2129
static int __maybe_unused atmel_xdmac_prepare(struct device *dev)
2130
{
2131
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2132
struct dma_chan *chan, *_chan;
2133
2134
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2135
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2136
2137
/* Wait for transfer completion, except in cyclic case. */
2138
if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
2139
return -EAGAIN;
2140
}
2141
return 0;
2142
}
2143
2144
static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
2145
{
2146
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2147
struct dma_chan *chan, *_chan;
2148
int ret;
2149
2150
ret = pm_runtime_resume_and_get(atxdmac->dev);
2151
if (ret < 0)
2152
return ret;
2153
2154
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2155
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2156
2157
atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
2158
if (at_xdmac_chan_is_cyclic(atchan)) {
2159
if (!at_xdmac_chan_is_paused(atchan)) {
2160
dev_warn(chan2dev(chan), "%s: channel %d not paused\n",
2161
__func__, chan->chan_id);
2162
at_xdmac_device_pause_internal(atchan);
2163
at_xdmac_runtime_suspend_descriptors(atchan);
2164
}
2165
atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
2166
atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
2167
atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
2168
}
2169
}
2170
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
2171
atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
2172
2173
at_xdmac_off(atxdmac, false);
2174
pm_runtime_mark_last_busy(atxdmac->dev);
2175
pm_runtime_put_noidle(atxdmac->dev);
2176
clk_disable_unprepare(atxdmac->clk);
2177
2178
return 0;
2179
}
2180
2181
static int __maybe_unused atmel_xdmac_resume(struct device *dev)
2182
{
2183
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2184
struct at_xdmac_chan *atchan;
2185
struct dma_chan *chan, *_chan;
2186
struct platform_device *pdev = container_of(dev, struct platform_device, dev);
2187
int i, ret;
2188
2189
ret = clk_prepare_enable(atxdmac->clk);
2190
if (ret)
2191
return ret;
2192
2193
pm_runtime_get_noresume(atxdmac->dev);
2194
2195
at_xdmac_axi_config(pdev);
2196
2197
/* Clear pending interrupts. */
2198
for (i = 0; i < atxdmac->dma.chancnt; i++) {
2199
atchan = &atxdmac->chan[i];
2200
while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2201
cpu_relax();
2202
}
2203
2204
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
2205
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2206
atchan = to_at_xdmac_chan(chan);
2207
2208
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
2209
if (at_xdmac_chan_is_cyclic(atchan)) {
2210
/*
2211
* Resume only channels not explicitly paused by
2212
* consumers.
2213
*/
2214
if (at_xdmac_chan_is_paused_internal(atchan)) {
2215
ret = at_xdmac_runtime_resume_descriptors(atchan);
2216
if (ret < 0)
2217
return ret;
2218
at_xdmac_device_resume_internal(atchan);
2219
}
2220
2221
/*
2222
* We may resume from a deep sleep state where power
2223
* to DMA controller is cut-off. Thus, restore the
2224
* suspend state of channels set though dmaengine API.
2225
*/
2226
else if (at_xdmac_chan_is_paused(atchan))
2227
at_xdmac_device_pause_set(atxdmac, atchan);
2228
2229
at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
2230
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
2231
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
2232
wmb();
2233
if (atxdmac->save_gs & atchan->mask)
2234
at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
2235
}
2236
}
2237
2238
pm_runtime_mark_last_busy(atxdmac->dev);
2239
pm_runtime_put_autosuspend(atxdmac->dev);
2240
2241
return 0;
2242
}
2243
2244
static int __maybe_unused atmel_xdmac_runtime_suspend(struct device *dev)
2245
{
2246
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2247
2248
clk_disable(atxdmac->clk);
2249
2250
return 0;
2251
}
2252
2253
static int __maybe_unused atmel_xdmac_runtime_resume(struct device *dev)
2254
{
2255
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2256
2257
return clk_enable(atxdmac->clk);
2258
}
2259
2260
static int at_xdmac_probe(struct platform_device *pdev)
2261
{
2262
struct at_xdmac *atxdmac;
2263
int irq, nr_channels, i, ret;
2264
void __iomem *base;
2265
u32 reg;
2266
2267
irq = platform_get_irq(pdev, 0);
2268
if (irq < 0)
2269
return irq;
2270
2271
base = devm_platform_ioremap_resource(pdev, 0);
2272
if (IS_ERR(base))
2273
return PTR_ERR(base);
2274
2275
/*
2276
* Read number of xdmac channels, read helper function can't be used
2277
* since atxdmac is not yet allocated and we need to know the number
2278
* of channels to do the allocation.
2279
*/
2280
reg = readl_relaxed(base + AT_XDMAC_GTYPE);
2281
nr_channels = AT_XDMAC_NB_CH(reg);
2282
if (nr_channels > AT_XDMAC_MAX_CHAN) {
2283
dev_err(&pdev->dev, "invalid number of channels (%u)\n",
2284
nr_channels);
2285
return -EINVAL;
2286
}
2287
2288
atxdmac = devm_kzalloc(&pdev->dev,
2289
struct_size(atxdmac, chan, nr_channels),
2290
GFP_KERNEL);
2291
if (!atxdmac) {
2292
dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
2293
return -ENOMEM;
2294
}
2295
2296
atxdmac->regs = base;
2297
atxdmac->irq = irq;
2298
atxdmac->dev = &pdev->dev;
2299
2300
atxdmac->layout = of_device_get_match_data(&pdev->dev);
2301
if (!atxdmac->layout)
2302
return -ENODEV;
2303
2304
atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
2305
if (IS_ERR(atxdmac->clk)) {
2306
dev_err(&pdev->dev, "can't get dma_clk\n");
2307
return PTR_ERR(atxdmac->clk);
2308
}
2309
2310
/* Do not use dev res to prevent races with tasklet */
2311
ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
2312
if (ret) {
2313
dev_err(&pdev->dev, "can't request irq\n");
2314
return ret;
2315
}
2316
2317
ret = clk_prepare_enable(atxdmac->clk);
2318
if (ret) {
2319
dev_err(&pdev->dev, "can't prepare or enable clock\n");
2320
goto err_free_irq;
2321
}
2322
2323
atxdmac->at_xdmac_desc_pool =
2324
dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
2325
sizeof(struct at_xdmac_desc), 4, 0);
2326
if (!atxdmac->at_xdmac_desc_pool) {
2327
dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
2328
ret = -ENOMEM;
2329
goto err_clk_disable;
2330
}
2331
2332
dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
2333
dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
2334
dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
2335
dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
2336
dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
2337
dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
2338
/*
2339
* Without DMA_PRIVATE the driver is not able to allocate more than
2340
* one channel, second allocation fails in private_candidate.
2341
*/
2342
dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
2343
atxdmac->dma.dev = &pdev->dev;
2344
atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
2345
atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
2346
atxdmac->dma.device_tx_status = at_xdmac_tx_status;
2347
atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
2348
atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
2349
atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
2350
atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
2351
atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
2352
atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg;
2353
atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
2354
atxdmac->dma.device_config = at_xdmac_device_config;
2355
atxdmac->dma.device_pause = at_xdmac_device_pause;
2356
atxdmac->dma.device_resume = at_xdmac_device_resume;
2357
atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
2358
atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2359
atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2360
atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2361
atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2362
2363
platform_set_drvdata(pdev, atxdmac);
2364
2365
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2366
pm_runtime_use_autosuspend(&pdev->dev);
2367
pm_runtime_set_active(&pdev->dev);
2368
pm_runtime_enable(&pdev->dev);
2369
pm_runtime_get_noresume(&pdev->dev);
2370
2371
/* Init channels. */
2372
INIT_LIST_HEAD(&atxdmac->dma.channels);
2373
2374
/* Disable all chans and interrupts. */
2375
at_xdmac_off(atxdmac, true);
2376
2377
for (i = 0; i < nr_channels; i++) {
2378
struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2379
2380
atchan->chan.device = &atxdmac->dma;
2381
list_add_tail(&atchan->chan.device_node,
2382
&atxdmac->dma.channels);
2383
2384
atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
2385
atchan->mask = 1 << i;
2386
2387
spin_lock_init(&atchan->lock);
2388
INIT_LIST_HEAD(&atchan->xfers_list);
2389
INIT_LIST_HEAD(&atchan->free_descs_list);
2390
tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
2391
2392
/* Clear pending interrupts. */
2393
while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2394
cpu_relax();
2395
}
2396
2397
ret = dma_async_device_register(&atxdmac->dma);
2398
if (ret) {
2399
dev_err(&pdev->dev, "fail to register DMA engine device\n");
2400
goto err_pm_disable;
2401
}
2402
2403
ret = of_dma_controller_register(pdev->dev.of_node,
2404
at_xdmac_xlate, atxdmac);
2405
if (ret) {
2406
dev_err(&pdev->dev, "could not register of dma controller\n");
2407
goto err_dma_unregister;
2408
}
2409
2410
dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
2411
nr_channels, atxdmac->regs);
2412
2413
at_xdmac_axi_config(pdev);
2414
2415
pm_runtime_mark_last_busy(&pdev->dev);
2416
pm_runtime_put_autosuspend(&pdev->dev);
2417
2418
return 0;
2419
2420
err_dma_unregister:
2421
dma_async_device_unregister(&atxdmac->dma);
2422
err_pm_disable:
2423
pm_runtime_put_noidle(&pdev->dev);
2424
pm_runtime_disable(&pdev->dev);
2425
pm_runtime_set_suspended(&pdev->dev);
2426
pm_runtime_dont_use_autosuspend(&pdev->dev);
2427
err_clk_disable:
2428
clk_disable_unprepare(atxdmac->clk);
2429
err_free_irq:
2430
free_irq(atxdmac->irq, atxdmac);
2431
return ret;
2432
}
2433
2434
static void at_xdmac_remove(struct platform_device *pdev)
2435
{
2436
struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2437
int i;
2438
2439
at_xdmac_off(atxdmac, true);
2440
of_dma_controller_free(pdev->dev.of_node);
2441
dma_async_device_unregister(&atxdmac->dma);
2442
pm_runtime_disable(atxdmac->dev);
2443
pm_runtime_set_suspended(&pdev->dev);
2444
pm_runtime_dont_use_autosuspend(&pdev->dev);
2445
clk_disable_unprepare(atxdmac->clk);
2446
2447
free_irq(atxdmac->irq, atxdmac);
2448
2449
for (i = 0; i < atxdmac->dma.chancnt; i++) {
2450
struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2451
2452
tasklet_kill(&atchan->tasklet);
2453
at_xdmac_free_chan_resources(&atchan->chan);
2454
}
2455
}
2456
2457
static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
2458
.prepare = atmel_xdmac_prepare,
2459
SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
2460
SET_RUNTIME_PM_OPS(atmel_xdmac_runtime_suspend,
2461
atmel_xdmac_runtime_resume, NULL)
2462
};
2463
2464
static const struct of_device_id atmel_xdmac_dt_ids[] = {
2465
{
2466
.compatible = "atmel,sama5d4-dma",
2467
.data = &at_xdmac_sama5d4_layout,
2468
}, {
2469
.compatible = "microchip,sama7g5-dma",
2470
.data = &at_xdmac_sama7g5_layout,
2471
}, {
2472
/* sentinel */
2473
}
2474
};
2475
MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
2476
2477
static struct platform_driver at_xdmac_driver = {
2478
.probe = at_xdmac_probe,
2479
.remove = at_xdmac_remove,
2480
.driver = {
2481
.name = "at_xdmac",
2482
.of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
2483
.pm = pm_ptr(&atmel_xdmac_dev_pm_ops),
2484
}
2485
};
2486
2487
static int __init at_xdmac_init(void)
2488
{
2489
return platform_driver_register(&at_xdmac_driver);
2490
}
2491
subsys_initcall(at_xdmac_init);
2492
2493
static void __exit at_xdmac_exit(void)
2494
{
2495
platform_driver_unregister(&at_xdmac_driver);
2496
}
2497
module_exit(at_xdmac_exit);
2498
2499
MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
2500
MODULE_AUTHOR("Ludovic Desroches <[email protected]>");
2501
MODULE_LICENSE("GPL");
2502
2503