Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/dw-edma/dw-hdma-v0-core.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (c) 2023 Cai Huoqing
4
* Synopsys DesignWare HDMA v0 core
5
*/
6
7
#include <linux/bitfield.h>
8
#include <linux/irqreturn.h>
9
#include <linux/io-64-nonatomic-lo-hi.h>
10
11
#include "dw-edma-core.h"
12
#include "dw-hdma-v0-core.h"
13
#include "dw-hdma-v0-regs.h"
14
#include "dw-hdma-v0-debugfs.h"
15
16
enum dw_hdma_control {
17
DW_HDMA_V0_CB = BIT(0),
18
DW_HDMA_V0_TCB = BIT(1),
19
DW_HDMA_V0_LLP = BIT(2),
20
DW_HDMA_V0_LWIE = BIT(3),
21
DW_HDMA_V0_RWIE = BIT(4),
22
DW_HDMA_V0_CCS = BIT(8),
23
DW_HDMA_V0_LLE = BIT(9),
24
};
25
26
static inline struct dw_hdma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
27
{
28
return dw->chip->reg_base;
29
}
30
31
static inline struct dw_hdma_v0_ch_regs __iomem *
32
__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
33
{
34
if (dir == EDMA_DIR_WRITE)
35
return &(__dw_regs(dw)->ch[ch].wr);
36
else
37
return &(__dw_regs(dw)->ch[ch].rd);
38
}
39
40
#define SET_CH_32(dw, dir, ch, name, value) \
41
writel(value, &(__dw_ch_regs(dw, dir, ch)->name))
42
43
#define GET_CH_32(dw, dir, ch, name) \
44
readl(&(__dw_ch_regs(dw, dir, ch)->name))
45
46
#define SET_BOTH_CH_32(dw, ch, name, value) \
47
do { \
48
writel(value, &(__dw_ch_regs(dw, EDMA_DIR_WRITE, ch)->name)); \
49
writel(value, &(__dw_ch_regs(dw, EDMA_DIR_READ, ch)->name)); \
50
} while (0)
51
52
/* HDMA management callbacks */
53
static void dw_hdma_v0_core_off(struct dw_edma *dw)
54
{
55
int id;
56
57
for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
58
SET_BOTH_CH_32(dw, id, int_setup,
59
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
60
SET_BOTH_CH_32(dw, id, int_clear,
61
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
62
SET_BOTH_CH_32(dw, id, ch_en, 0);
63
}
64
}
65
66
static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
67
{
68
/*
69
* The HDMA IP have no way to know the number of hardware channels
70
* available, we set it to maximum channels and let the platform
71
* set the right number of channels.
72
*/
73
return HDMA_V0_MAX_NR_CH;
74
}
75
76
static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
77
{
78
struct dw_edma *dw = chan->dw;
79
u32 tmp;
80
81
tmp = FIELD_GET(HDMA_V0_CH_STATUS_MASK,
82
GET_CH_32(dw, chan->id, chan->dir, ch_stat));
83
84
if (tmp == 1)
85
return DMA_IN_PROGRESS;
86
else if (tmp == 3)
87
return DMA_COMPLETE;
88
else
89
return DMA_ERROR;
90
}
91
92
static void dw_hdma_v0_core_clear_done_int(struct dw_edma_chan *chan)
93
{
94
struct dw_edma *dw = chan->dw;
95
96
SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_STOP_INT_MASK);
97
}
98
99
static void dw_hdma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
100
{
101
struct dw_edma *dw = chan->dw;
102
103
SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_ABORT_INT_MASK);
104
}
105
106
static u32 dw_hdma_v0_core_status_int(struct dw_edma_chan *chan)
107
{
108
struct dw_edma *dw = chan->dw;
109
110
return GET_CH_32(dw, chan->dir, chan->id, int_stat);
111
}
112
113
static irqreturn_t
114
dw_hdma_v0_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir,
115
dw_edma_handler_t done, dw_edma_handler_t abort)
116
{
117
struct dw_edma *dw = dw_irq->dw;
118
unsigned long total, pos, val;
119
irqreturn_t ret = IRQ_NONE;
120
struct dw_edma_chan *chan;
121
unsigned long off, mask;
122
123
if (dir == EDMA_DIR_WRITE) {
124
total = dw->wr_ch_cnt;
125
off = 0;
126
mask = dw_irq->wr_mask;
127
} else {
128
total = dw->rd_ch_cnt;
129
off = dw->wr_ch_cnt;
130
mask = dw_irq->rd_mask;
131
}
132
133
for_each_set_bit(pos, &mask, total) {
134
chan = &dw->chan[pos + off];
135
136
val = dw_hdma_v0_core_status_int(chan);
137
if (FIELD_GET(HDMA_V0_STOP_INT_MASK, val)) {
138
dw_hdma_v0_core_clear_done_int(chan);
139
done(chan);
140
141
ret = IRQ_HANDLED;
142
}
143
144
if (FIELD_GET(HDMA_V0_ABORT_INT_MASK, val)) {
145
dw_hdma_v0_core_clear_abort_int(chan);
146
abort(chan);
147
148
ret = IRQ_HANDLED;
149
}
150
}
151
152
return ret;
153
}
154
155
static void dw_hdma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i,
156
u32 control, u32 size, u64 sar, u64 dar)
157
{
158
ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli);
159
160
if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
161
struct dw_hdma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs;
162
163
lli->control = control;
164
lli->transfer_size = size;
165
lli->sar.reg = sar;
166
lli->dar.reg = dar;
167
} else {
168
struct dw_hdma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs;
169
170
writel(control, &lli->control);
171
writel(size, &lli->transfer_size);
172
writeq(sar, &lli->sar.reg);
173
writeq(dar, &lli->dar.reg);
174
}
175
}
176
177
static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk,
178
int i, u32 control, u64 pointer)
179
{
180
ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli);
181
182
if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
183
struct dw_hdma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs;
184
185
llp->control = control;
186
llp->llp.reg = pointer;
187
} else {
188
struct dw_hdma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs;
189
190
writel(control, &llp->control);
191
writeq(pointer, &llp->llp.reg);
192
}
193
}
194
195
static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
196
{
197
struct dw_edma_burst *child;
198
u32 control = 0, i = 0;
199
200
if (chunk->cb)
201
control = DW_HDMA_V0_CB;
202
203
list_for_each_entry(child, &chunk->burst->list, list)
204
dw_hdma_v0_write_ll_data(chunk, i++, control, child->sz,
205
child->sar, child->dar);
206
207
control = DW_HDMA_V0_LLP | DW_HDMA_V0_TCB;
208
if (!chunk->cb)
209
control |= DW_HDMA_V0_CB;
210
211
dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
212
}
213
214
static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
215
{
216
/*
217
* In case of remote HDMA engine setup, the DW PCIe RP/EP internal
218
* configuration registers and application memory are normally accessed
219
* over different buses. Ensure LL-data reaches the memory before the
220
* doorbell register is toggled by issuing the dummy-read from the remote
221
* LL memory in a hope that the MRd TLP will return only after the
222
* last MWr TLP is completed
223
*/
224
if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
225
readl(chunk->ll_region.vaddr.io);
226
}
227
228
static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
229
{
230
struct dw_edma_chan *chan = chunk->chan;
231
struct dw_edma *dw = chan->dw;
232
u32 tmp;
233
234
dw_hdma_v0_core_write_chunk(chunk);
235
236
if (first) {
237
/* Enable engine */
238
SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0));
239
/* Interrupt unmask - stop, abort */
240
tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup);
241
tmp &= ~(HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
242
/* Interrupt enable - stop, abort */
243
tmp |= HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
244
if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
245
tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
246
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
247
/* Channel control */
248
SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
249
/* Linked list */
250
/* llp is not aligned on 64bit -> keep 32bit accesses */
251
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
252
lower_32_bits(chunk->ll_region.paddr));
253
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
254
upper_32_bits(chunk->ll_region.paddr));
255
}
256
/* Set consumer cycle */
257
SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
258
HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
259
260
dw_hdma_v0_sync_ll_data(chunk);
261
262
/* Doorbell */
263
SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
264
}
265
266
static void dw_hdma_v0_core_ch_config(struct dw_edma_chan *chan)
267
{
268
struct dw_edma *dw = chan->dw;
269
270
/* MSI done addr - low, high */
271
SET_CH_32(dw, chan->dir, chan->id, msi_stop.lsb, chan->msi.address_lo);
272
SET_CH_32(dw, chan->dir, chan->id, msi_stop.msb, chan->msi.address_hi);
273
/* MSI abort addr - low, high */
274
SET_CH_32(dw, chan->dir, chan->id, msi_abort.lsb, chan->msi.address_lo);
275
SET_CH_32(dw, chan->dir, chan->id, msi_abort.msb, chan->msi.address_hi);
276
/* config MSI data */
277
SET_CH_32(dw, chan->dir, chan->id, msi_msgdata, chan->msi.data);
278
}
279
280
/* HDMA debugfs callbacks */
281
static void dw_hdma_v0_core_debugfs_on(struct dw_edma *dw)
282
{
283
dw_hdma_v0_debugfs_on(dw);
284
}
285
286
static const struct dw_edma_core_ops dw_hdma_v0_core = {
287
.off = dw_hdma_v0_core_off,
288
.ch_count = dw_hdma_v0_core_ch_count,
289
.ch_status = dw_hdma_v0_core_ch_status,
290
.handle_int = dw_hdma_v0_core_handle_int,
291
.start = dw_hdma_v0_core_start,
292
.ch_config = dw_hdma_v0_core_ch_config,
293
.debugfs_on = dw_hdma_v0_core_debugfs_on,
294
};
295
296
void dw_hdma_v0_core_register(struct dw_edma *dw)
297
{
298
dw->core = &dw_hdma_v0_core;
299
}
300
301