Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/ioat/dma.h
26285 views
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
/*
3
* Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
4
*/
5
#ifndef IOATDMA_H
6
#define IOATDMA_H
7
8
#include <linux/dmaengine.h>
9
#include <linux/init.h>
10
#include <linux/dmapool.h>
11
#include <linux/cache.h>
12
#include <linux/pci_ids.h>
13
#include <linux/circ_buf.h>
14
#include <linux/interrupt.h>
15
#include "registers.h"
16
#include "hw.h"
17
18
#define IOAT_DMA_VERSION "5.00"
19
20
#define IOAT_DMA_DCA_ANY_CPU ~0
21
22
#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
23
#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
24
#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
25
26
#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
27
28
/* ioat hardware assumes at least two sources for raid operations */
29
#define src_cnt_to_sw(x) ((x) + 2)
30
#define src_cnt_to_hw(x) ((x) - 2)
31
#define ndest_to_sw(x) ((x) + 1)
32
#define ndest_to_hw(x) ((x) - 1)
33
#define src16_cnt_to_sw(x) ((x) + 9)
34
#define src16_cnt_to_hw(x) ((x) - 9)
35
36
/*
37
* workaround for IOAT ver.3.0 null descriptor issue
38
* (channel returns error when size is 0)
39
*/
40
#define NULL_DESC_BUFFER_SIZE 1
41
42
enum ioat_irq_mode {
43
IOAT_NOIRQ = 0,
44
IOAT_MSIX,
45
IOAT_MSI,
46
IOAT_INTX
47
};
48
49
/**
50
* struct ioatdma_device - internal representation of a IOAT device
51
* @pdev: PCI-Express device
52
* @reg_base: MMIO register space base address
53
* @completion_pool: DMA buffers for completion ops
54
* @sed_hw_pool: DMA super descriptor pools
55
* @dma_dev: embedded struct dma_device
56
* @version: version of ioatdma device
57
* @msix_entries: irq handlers
58
* @idx: per channel data
59
* @dca: direct cache access context
60
* @irq_mode: interrupt mode (INTX, MSI, MSIX)
61
* @cap: read DMA capabilities register
62
*/
63
struct ioatdma_device {
64
struct pci_dev *pdev;
65
void __iomem *reg_base;
66
struct dma_pool *completion_pool;
67
#define MAX_SED_POOLS 5
68
struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
69
struct dma_device dma_dev;
70
u8 version;
71
#define IOAT_MAX_CHANS 4
72
struct msix_entry msix_entries[IOAT_MAX_CHANS];
73
struct ioatdma_chan *idx[IOAT_MAX_CHANS];
74
struct dca_provider *dca;
75
enum ioat_irq_mode irq_mode;
76
u32 cap;
77
int chancnt;
78
79
/* shadow version for CB3.3 chan reset errata workaround */
80
u64 msixtba0;
81
u64 msixdata0;
82
u32 msixpba;
83
};
84
85
#define IOAT_MAX_ORDER 16
86
#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
87
#define IOAT_CHUNK_SIZE (SZ_512K)
88
#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
89
90
struct ioat_descs {
91
void *virt;
92
dma_addr_t hw;
93
};
94
95
struct ioatdma_chan {
96
struct dma_chan dma_chan;
97
void __iomem *reg_base;
98
dma_addr_t last_completion;
99
spinlock_t cleanup_lock;
100
unsigned long state;
101
#define IOAT_CHAN_DOWN 0
102
#define IOAT_COMPLETION_ACK 1
103
#define IOAT_RESET_PENDING 2
104
#define IOAT_KOBJ_INIT_FAIL 3
105
#define IOAT_RUN 5
106
#define IOAT_CHAN_ACTIVE 6
107
struct timer_list timer;
108
#define RESET_DELAY msecs_to_jiffies(100)
109
struct ioatdma_device *ioat_dma;
110
dma_addr_t completion_dma;
111
u64 *completion;
112
struct tasklet_struct cleanup_task;
113
struct kobject kobj;
114
115
/* ioat v2 / v3 channel attributes
116
* @xfercap_log; log2 of channel max transfer length (for fast division)
117
* @head: allocated index
118
* @issued: hardware notification point
119
* @tail: cleanup index
120
* @dmacount: identical to 'head' except for occasionally resetting to zero
121
* @alloc_order: log2 of the number of allocated descriptors
122
* @produce: number of descriptors to produce at submit time
123
* @ring: software ring buffer implementation of hardware ring
124
* @prep_lock: serializes descriptor preparation (producers)
125
*/
126
size_t xfercap_log;
127
u16 head;
128
u16 issued;
129
u16 tail;
130
u16 dmacount;
131
u16 alloc_order;
132
u16 produce;
133
struct ioat_ring_ent **ring;
134
spinlock_t prep_lock;
135
struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK];
136
int desc_chunks;
137
int intr_coalesce;
138
int prev_intr_coalesce;
139
};
140
141
struct ioat_sysfs_entry {
142
struct attribute attr;
143
ssize_t (*show)(struct dma_chan *, char *);
144
ssize_t (*store)(struct dma_chan *, const char *, size_t);
145
};
146
147
/**
148
* struct ioat_sed_ent - wrapper around super extended hardware descriptor
149
* @hw: hardware SED
150
* @dma: dma address for the SED
151
* @parent: point to the dma descriptor that's the parent
152
* @hw_pool: descriptor pool index
153
*/
154
struct ioat_sed_ent {
155
struct ioat_sed_raw_descriptor *hw;
156
dma_addr_t dma;
157
struct ioat_ring_ent *parent;
158
unsigned int hw_pool;
159
};
160
161
/**
162
* struct ioat_ring_ent - wrapper around hardware descriptor
163
* @hw: hardware DMA descriptor (for memcpy)
164
* @xor: hardware xor descriptor
165
* @xor_ex: hardware xor extension descriptor
166
* @pq: hardware pq descriptor
167
* @pq_ex: hardware pq extension descriptor
168
* @pqu: hardware pq update descriptor
169
* @raw: hardware raw (un-typed) descriptor
170
* @txd: the generic software descriptor for all engines
171
* @len: total transaction length for unmap
172
* @result: asynchronous result of validate operations
173
* @id: identifier for debug
174
* @sed: pointer to super extended descriptor sw desc
175
*/
176
177
struct ioat_ring_ent {
178
union {
179
struct ioat_dma_descriptor *hw;
180
struct ioat_xor_descriptor *xor;
181
struct ioat_xor_ext_descriptor *xor_ex;
182
struct ioat_pq_descriptor *pq;
183
struct ioat_pq_ext_descriptor *pq_ex;
184
struct ioat_pq_update_descriptor *pqu;
185
struct ioat_raw_descriptor *raw;
186
};
187
size_t len;
188
struct dma_async_tx_descriptor txd;
189
enum sum_check_flags *result;
190
#ifdef DEBUG
191
int id;
192
#endif
193
struct ioat_sed_ent *sed;
194
};
195
196
extern const struct sysfs_ops ioat_sysfs_ops;
197
extern struct ioat_sysfs_entry ioat_version_attr;
198
extern struct ioat_sysfs_entry ioat_cap_attr;
199
extern int ioat_pending_level;
200
extern struct kobj_type ioat_ktype;
201
extern struct kmem_cache *ioat_cache;
202
extern struct kmem_cache *ioat_sed_cache;
203
204
static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
205
{
206
return container_of(c, struct ioatdma_chan, dma_chan);
207
}
208
209
/* wrapper around hardware descriptor format + additional software fields */
210
#ifdef DEBUG
211
#define set_desc_id(desc, i) ((desc)->id = (i))
212
#define desc_id(desc) ((desc)->id)
213
#else
214
#define set_desc_id(desc, i)
215
#define desc_id(desc) (0)
216
#endif
217
218
static inline void
219
__dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
220
struct dma_async_tx_descriptor *tx, int id)
221
{
222
struct device *dev = to_dev(ioat_chan);
223
224
dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
225
" ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
226
(unsigned long long) tx->phys,
227
(unsigned long long) hw->next, tx->cookie, tx->flags,
228
hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
229
}
230
231
#define dump_desc_dbg(c, d) \
232
({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
233
234
static inline struct ioatdma_chan *
235
ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
236
{
237
return ioat_dma->idx[index];
238
}
239
240
static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
241
{
242
return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET);
243
}
244
245
static inline u64 ioat_chansts_to_addr(u64 status)
246
{
247
return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
248
}
249
250
static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
251
{
252
return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
253
}
254
255
static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
256
{
257
u8 ver = ioat_chan->ioat_dma->version;
258
259
writeb(IOAT_CHANCMD_SUSPEND,
260
ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
261
}
262
263
static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
264
{
265
u8 ver = ioat_chan->ioat_dma->version;
266
267
writeb(IOAT_CHANCMD_RESET,
268
ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
269
}
270
271
static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
272
{
273
u8 ver = ioat_chan->ioat_dma->version;
274
u8 cmd;
275
276
cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
277
return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
278
}
279
280
static inline bool is_ioat_active(unsigned long status)
281
{
282
return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
283
}
284
285
static inline bool is_ioat_idle(unsigned long status)
286
{
287
return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
288
}
289
290
static inline bool is_ioat_halted(unsigned long status)
291
{
292
return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
293
}
294
295
static inline bool is_ioat_suspended(unsigned long status)
296
{
297
return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
298
}
299
300
/* channel was fatally programmed */
301
static inline bool is_ioat_bug(unsigned long err)
302
{
303
return !!err;
304
}
305
306
307
static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
308
{
309
return 1 << ioat_chan->alloc_order;
310
}
311
312
/* count of descriptors in flight with the engine */
313
static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
314
{
315
return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
316
ioat_ring_size(ioat_chan));
317
}
318
319
/* count of descriptors pending submission to hardware */
320
static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
321
{
322
return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
323
ioat_ring_size(ioat_chan));
324
}
325
326
static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
327
{
328
return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
329
}
330
331
static inline u16
332
ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
333
{
334
u16 num_descs = len >> ioat_chan->xfercap_log;
335
336
num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
337
return num_descs;
338
}
339
340
static inline struct ioat_ring_ent *
341
ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
342
{
343
return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
344
}
345
346
static inline void
347
ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
348
{
349
writel(addr & 0x00000000FFFFFFFF,
350
ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
351
writel(addr >> 32,
352
ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
353
}
354
355
/* IOAT Prep functions */
356
struct dma_async_tx_descriptor *
357
ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
358
dma_addr_t dma_src, size_t len, unsigned long flags);
359
struct dma_async_tx_descriptor *
360
ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
361
struct dma_async_tx_descriptor *
362
ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
363
unsigned int src_cnt, size_t len, unsigned long flags);
364
struct dma_async_tx_descriptor *
365
ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
366
unsigned int src_cnt, size_t len,
367
enum sum_check_flags *result, unsigned long flags);
368
struct dma_async_tx_descriptor *
369
ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
370
unsigned int src_cnt, const unsigned char *scf, size_t len,
371
unsigned long flags);
372
struct dma_async_tx_descriptor *
373
ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
374
unsigned int src_cnt, const unsigned char *scf, size_t len,
375
enum sum_check_flags *pqres, unsigned long flags);
376
struct dma_async_tx_descriptor *
377
ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
378
unsigned int src_cnt, size_t len, unsigned long flags);
379
struct dma_async_tx_descriptor *
380
ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
381
unsigned int src_cnt, size_t len,
382
enum sum_check_flags *result, unsigned long flags);
383
384
/* IOAT Operation functions */
385
irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
386
irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
387
struct ioat_ring_ent **
388
ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
389
void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
390
void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
391
int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
392
enum dma_status
393
ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
394
struct dma_tx_state *txstate);
395
void ioat_cleanup_event(struct tasklet_struct *t);
396
void ioat_timer_event(struct timer_list *t);
397
int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
398
void ioat_issue_pending(struct dma_chan *chan);
399
400
/* IOAT Init functions */
401
bool is_bwd_ioat(struct pci_dev *pdev);
402
struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
403
void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
404
void ioat_kobject_del(struct ioatdma_device *ioat_dma);
405
int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
406
void ioat_stop(struct ioatdma_chan *ioat_chan);
407
#endif /* IOATDMA_H */
408
409