Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/idxd/dma.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3
#include <linux/init.h>
4
#include <linux/kernel.h>
5
#include <linux/module.h>
6
#include <linux/pci.h>
7
#include <linux/device.h>
8
#include <linux/io-64-nonatomic-lo-hi.h>
9
#include <linux/dmaengine.h>
10
#include <uapi/linux/idxd.h>
11
#include "../dmaengine.h"
12
#include "registers.h"
13
#include "idxd.h"
14
15
static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
16
{
17
struct idxd_dma_chan *idxd_chan;
18
19
idxd_chan = container_of(c, struct idxd_dma_chan, chan);
20
return idxd_chan->wq;
21
}
22
23
void idxd_dma_complete_txd(struct idxd_desc *desc,
24
enum idxd_complete_type comp_type,
25
bool free_desc, void *ctx, u32 *status)
26
{
27
struct idxd_device *idxd = desc->wq->idxd;
28
struct dma_async_tx_descriptor *tx;
29
struct dmaengine_result res;
30
int complete = 1;
31
32
if (desc->completion->status == DSA_COMP_SUCCESS) {
33
res.result = DMA_TRANS_NOERROR;
34
} else if (desc->completion->status) {
35
if (idxd->request_int_handles && comp_type != IDXD_COMPLETE_ABORT &&
36
desc->completion->status == DSA_COMP_INT_HANDLE_INVAL &&
37
idxd_queue_int_handle_resubmit(desc))
38
return;
39
res.result = DMA_TRANS_WRITE_FAILED;
40
} else if (comp_type == IDXD_COMPLETE_ABORT) {
41
res.result = DMA_TRANS_ABORTED;
42
} else {
43
complete = 0;
44
}
45
46
tx = &desc->txd;
47
if (complete && tx->cookie) {
48
dma_cookie_complete(tx);
49
dma_descriptor_unmap(tx);
50
dmaengine_desc_get_callback_invoke(tx, &res);
51
tx->callback = NULL;
52
tx->callback_result = NULL;
53
}
54
55
if (free_desc)
56
idxd_free_desc(desc->wq, desc);
57
}
58
59
static void op_flag_setup(unsigned long flags, u32 *desc_flags)
60
{
61
*desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
62
if (flags & DMA_PREP_INTERRUPT)
63
*desc_flags |= IDXD_OP_FLAG_RCI;
64
}
65
66
static inline void idxd_prep_desc_common(struct idxd_wq *wq,
67
struct dsa_hw_desc *hw, char opcode,
68
u64 addr_f1, u64 addr_f2, u64 len,
69
u64 compl, u32 flags)
70
{
71
hw->flags = flags;
72
hw->opcode = opcode;
73
hw->src_addr = addr_f1;
74
hw->dst_addr = addr_f2;
75
hw->xfer_size = len;
76
/*
77
* For dedicated WQ, this field is ignored and HW will use the WQCFG.priv
78
* field instead. This field should be set to 0 for kernel descriptors
79
* since kernel DMA on VT-d supports "user" privilege only.
80
*/
81
hw->priv = 0;
82
hw->completion_addr = compl;
83
}
84
85
static struct dma_async_tx_descriptor *
86
idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
87
{
88
struct idxd_wq *wq = to_idxd_wq(c);
89
u32 desc_flags;
90
struct idxd_desc *desc;
91
92
if (wq->state != IDXD_WQ_ENABLED)
93
return NULL;
94
95
op_flag_setup(flags, &desc_flags);
96
desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
97
if (IS_ERR(desc))
98
return NULL;
99
100
idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
101
0, 0, 0, desc->compl_dma, desc_flags);
102
desc->txd.flags = flags;
103
return &desc->txd;
104
}
105
106
static struct dma_async_tx_descriptor *
107
idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
108
dma_addr_t dma_src, size_t len, unsigned long flags)
109
{
110
struct idxd_wq *wq = to_idxd_wq(c);
111
u32 desc_flags;
112
struct idxd_device *idxd = wq->idxd;
113
struct idxd_desc *desc;
114
115
if (wq->state != IDXD_WQ_ENABLED)
116
return NULL;
117
118
if (len > idxd->max_xfer_bytes)
119
return NULL;
120
121
op_flag_setup(flags, &desc_flags);
122
desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
123
if (IS_ERR(desc))
124
return NULL;
125
126
idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
127
dma_src, dma_dest, len, desc->compl_dma,
128
desc_flags);
129
130
desc->txd.flags = flags;
131
132
return &desc->txd;
133
}
134
135
static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
136
{
137
struct idxd_wq *wq = to_idxd_wq(chan);
138
struct device *dev = &wq->idxd->pdev->dev;
139
140
idxd_wq_get(wq);
141
dev_dbg(dev, "%s: client_count: %d\n", __func__,
142
idxd_wq_refcount(wq));
143
return 0;
144
}
145
146
static void idxd_dma_free_chan_resources(struct dma_chan *chan)
147
{
148
struct idxd_wq *wq = to_idxd_wq(chan);
149
struct device *dev = &wq->idxd->pdev->dev;
150
151
idxd_wq_put(wq);
152
dev_dbg(dev, "%s: client_count: %d\n", __func__,
153
idxd_wq_refcount(wq));
154
}
155
156
static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
157
dma_cookie_t cookie,
158
struct dma_tx_state *txstate)
159
{
160
return DMA_OUT_OF_ORDER;
161
}
162
163
/*
164
* issue_pending() does not need to do anything since tx_submit() does the job
165
* already.
166
*/
167
static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
168
{
169
}
170
171
static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
172
{
173
struct dma_chan *c = tx->chan;
174
struct idxd_wq *wq = to_idxd_wq(c);
175
dma_cookie_t cookie;
176
int rc;
177
struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
178
179
cookie = dma_cookie_assign(tx);
180
181
rc = idxd_submit_desc(wq, desc);
182
if (rc < 0) {
183
idxd_free_desc(wq, desc);
184
return rc;
185
}
186
187
return cookie;
188
}
189
190
static void idxd_dma_release(struct dma_device *device)
191
{
192
struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
193
194
kfree(idxd_dma);
195
}
196
197
int idxd_register_dma_device(struct idxd_device *idxd)
198
{
199
struct idxd_dma_dev *idxd_dma;
200
struct dma_device *dma;
201
struct device *dev = &idxd->pdev->dev;
202
int rc;
203
204
idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
205
if (!idxd_dma)
206
return -ENOMEM;
207
208
dma = &idxd_dma->dma;
209
INIT_LIST_HEAD(&dma->channels);
210
dma->dev = dev;
211
212
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
213
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
214
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
215
dma->device_release = idxd_dma_release;
216
217
dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
218
if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
219
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
220
dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
221
}
222
223
dma->device_tx_status = idxd_dma_tx_status;
224
dma->device_issue_pending = idxd_dma_issue_pending;
225
dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
226
dma->device_free_chan_resources = idxd_dma_free_chan_resources;
227
228
rc = dma_async_device_register(dma);
229
if (rc < 0) {
230
kfree(idxd_dma);
231
return rc;
232
}
233
234
idxd_dma->idxd = idxd;
235
/*
236
* This pointer is protected by the refs taken by the dma_chan. It will remain valid
237
* as long as there are outstanding channels.
238
*/
239
idxd->idxd_dma = idxd_dma;
240
return 0;
241
}
242
243
void idxd_unregister_dma_device(struct idxd_device *idxd)
244
{
245
dma_async_device_unregister(&idxd->idxd_dma->dma);
246
}
247
248
static int idxd_register_dma_channel(struct idxd_wq *wq)
249
{
250
struct idxd_device *idxd = wq->idxd;
251
struct dma_device *dma = &idxd->idxd_dma->dma;
252
struct device *dev = &idxd->pdev->dev;
253
struct idxd_dma_chan *idxd_chan;
254
struct dma_chan *chan;
255
int rc, i;
256
257
idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
258
if (!idxd_chan)
259
return -ENOMEM;
260
261
chan = &idxd_chan->chan;
262
chan->device = dma;
263
list_add_tail(&chan->device_node, &dma->channels);
264
265
for (i = 0; i < wq->num_descs; i++) {
266
struct idxd_desc *desc = wq->descs[i];
267
268
dma_async_tx_descriptor_init(&desc->txd, chan);
269
desc->txd.tx_submit = idxd_dma_tx_submit;
270
}
271
272
rc = dma_async_device_channel_register(dma, chan, NULL);
273
if (rc < 0) {
274
kfree(idxd_chan);
275
return rc;
276
}
277
278
wq->idxd_chan = idxd_chan;
279
idxd_chan->wq = wq;
280
get_device(wq_confdev(wq));
281
282
return 0;
283
}
284
285
static void idxd_unregister_dma_channel(struct idxd_wq *wq)
286
{
287
struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
288
struct dma_chan *chan = &idxd_chan->chan;
289
struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
290
291
dma_async_device_channel_unregister(&idxd_dma->dma, chan);
292
list_del(&chan->device_node);
293
kfree(wq->idxd_chan);
294
wq->idxd_chan = NULL;
295
put_device(wq_confdev(wq));
296
}
297
298
static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
299
{
300
struct device *dev = &idxd_dev->conf_dev;
301
struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
302
struct idxd_device *idxd = wq->idxd;
303
int rc;
304
305
if (idxd->state != IDXD_DEV_ENABLED)
306
return -ENXIO;
307
308
mutex_lock(&wq->wq_lock);
309
if (!idxd_wq_driver_name_match(wq, dev)) {
310
idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
311
rc = -ENODEV;
312
goto err;
313
}
314
315
wq->type = IDXD_WQT_KERNEL;
316
317
rc = idxd_drv_enable_wq(wq);
318
if (rc < 0) {
319
dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
320
rc = -ENXIO;
321
goto err;
322
}
323
324
rc = idxd_register_dma_channel(wq);
325
if (rc < 0) {
326
idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
327
dev_dbg(dev, "Failed to register dma channel\n");
328
goto err_dma;
329
}
330
331
idxd->cmd_status = 0;
332
mutex_unlock(&wq->wq_lock);
333
return 0;
334
335
err_dma:
336
idxd_drv_disable_wq(wq);
337
err:
338
wq->type = IDXD_WQT_NONE;
339
mutex_unlock(&wq->wq_lock);
340
return rc;
341
}
342
343
static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
344
{
345
struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
346
347
mutex_lock(&wq->wq_lock);
348
__idxd_wq_quiesce(wq);
349
idxd_unregister_dma_channel(wq);
350
idxd_drv_disable_wq(wq);
351
mutex_unlock(&wq->wq_lock);
352
}
353
354
static enum idxd_dev_type dev_types[] = {
355
IDXD_DEV_WQ,
356
IDXD_DEV_NONE,
357
};
358
359
struct idxd_device_driver idxd_dmaengine_drv = {
360
.probe = idxd_dmaengine_drv_probe,
361
.remove = idxd_dmaengine_drv_remove,
362
.desc_complete = idxd_dma_complete_txd,
363
.name = "dmaengine",
364
.type = dev_types,
365
};
366
EXPORT_SYMBOL_GPL(idxd_dmaengine_drv);
367
368