Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/dma/dmaengine.c
15109 views
1
/*
2
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3
*
4
* This program is free software; you can redistribute it and/or modify it
5
* under the terms of the GNU General Public License as published by the Free
6
* Software Foundation; either version 2 of the License, or (at your option)
7
* any later version.
8
*
9
* This program is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
* more details.
13
*
14
* You should have received a copy of the GNU General Public License along with
15
* this program; if not, write to the Free Software Foundation, Inc., 59
16
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
*
18
* The full GNU General Public License is included in this distribution in the
19
* file called COPYING.
20
*/
21
22
/*
23
* This code implements the DMA subsystem. It provides a HW-neutral interface
24
* for other kernel code to use asynchronous memory copy capabilities,
25
* if present, and allows different HW DMA drivers to register as providing
26
* this capability.
27
*
28
* Due to the fact we are accelerating what is already a relatively fast
29
* operation, the code goes to great lengths to avoid additional overhead,
30
* such as locking.
31
*
32
* LOCKING:
33
*
34
* The subsystem keeps a global list of dma_device structs it is protected by a
35
* mutex, dma_list_mutex.
36
*
37
* A subsystem can get access to a channel by calling dmaengine_get() followed
38
* by dma_find_channel(), or if it has need for an exclusive channel it can call
39
* dma_request_channel(). Once a channel is allocated a reference is taken
40
* against its corresponding driver to disable removal.
41
*
42
* Each device has a channels list, which runs unlocked but is never modified
43
* once the device is registered, it's just setup by the driver.
44
*
45
* See Documentation/dmaengine.txt for more details
46
*/
47
48
#include <linux/init.h>
49
#include <linux/module.h>
50
#include <linux/mm.h>
51
#include <linux/device.h>
52
#include <linux/dmaengine.h>
53
#include <linux/hardirq.h>
54
#include <linux/spinlock.h>
55
#include <linux/percpu.h>
56
#include <linux/rcupdate.h>
57
#include <linux/mutex.h>
58
#include <linux/jiffies.h>
59
#include <linux/rculist.h>
60
#include <linux/idr.h>
61
#include <linux/slab.h>
62
63
static DEFINE_MUTEX(dma_list_mutex);
64
static LIST_HEAD(dma_device_list);
65
static long dmaengine_ref_count;
66
static struct idr dma_idr;
67
68
/* --- sysfs implementation --- */
69
70
/**
71
* dev_to_dma_chan - convert a device pointer to the its sysfs container object
72
* @dev - device node
73
*
74
* Must be called under dma_list_mutex
75
*/
76
static struct dma_chan *dev_to_dma_chan(struct device *dev)
77
{
78
struct dma_chan_dev *chan_dev;
79
80
chan_dev = container_of(dev, typeof(*chan_dev), device);
81
return chan_dev->chan;
82
}
83
84
static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
85
{
86
struct dma_chan *chan;
87
unsigned long count = 0;
88
int i;
89
int err;
90
91
mutex_lock(&dma_list_mutex);
92
chan = dev_to_dma_chan(dev);
93
if (chan) {
94
for_each_possible_cpu(i)
95
count += per_cpu_ptr(chan->local, i)->memcpy_count;
96
err = sprintf(buf, "%lu\n", count);
97
} else
98
err = -ENODEV;
99
mutex_unlock(&dma_list_mutex);
100
101
return err;
102
}
103
104
static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
105
char *buf)
106
{
107
struct dma_chan *chan;
108
unsigned long count = 0;
109
int i;
110
int err;
111
112
mutex_lock(&dma_list_mutex);
113
chan = dev_to_dma_chan(dev);
114
if (chan) {
115
for_each_possible_cpu(i)
116
count += per_cpu_ptr(chan->local, i)->bytes_transferred;
117
err = sprintf(buf, "%lu\n", count);
118
} else
119
err = -ENODEV;
120
mutex_unlock(&dma_list_mutex);
121
122
return err;
123
}
124
125
static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
126
{
127
struct dma_chan *chan;
128
int err;
129
130
mutex_lock(&dma_list_mutex);
131
chan = dev_to_dma_chan(dev);
132
if (chan)
133
err = sprintf(buf, "%d\n", chan->client_count);
134
else
135
err = -ENODEV;
136
mutex_unlock(&dma_list_mutex);
137
138
return err;
139
}
140
141
static struct device_attribute dma_attrs[] = {
142
__ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
143
__ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
144
__ATTR(in_use, S_IRUGO, show_in_use, NULL),
145
__ATTR_NULL
146
};
147
148
static void chan_dev_release(struct device *dev)
149
{
150
struct dma_chan_dev *chan_dev;
151
152
chan_dev = container_of(dev, typeof(*chan_dev), device);
153
if (atomic_dec_and_test(chan_dev->idr_ref)) {
154
mutex_lock(&dma_list_mutex);
155
idr_remove(&dma_idr, chan_dev->dev_id);
156
mutex_unlock(&dma_list_mutex);
157
kfree(chan_dev->idr_ref);
158
}
159
kfree(chan_dev);
160
}
161
162
static struct class dma_devclass = {
163
.name = "dma",
164
.dev_attrs = dma_attrs,
165
.dev_release = chan_dev_release,
166
};
167
168
/* --- client and device registration --- */
169
170
#define dma_device_satisfies_mask(device, mask) \
171
__dma_device_satisfies_mask((device), &(mask))
172
static int
173
__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
174
{
175
dma_cap_mask_t has;
176
177
bitmap_and(has.bits, want->bits, device->cap_mask.bits,
178
DMA_TX_TYPE_END);
179
return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
180
}
181
182
static struct module *dma_chan_to_owner(struct dma_chan *chan)
183
{
184
return chan->device->dev->driver->owner;
185
}
186
187
/**
188
* balance_ref_count - catch up the channel reference count
189
* @chan - channel to balance ->client_count versus dmaengine_ref_count
190
*
191
* balance_ref_count must be called under dma_list_mutex
192
*/
193
static void balance_ref_count(struct dma_chan *chan)
194
{
195
struct module *owner = dma_chan_to_owner(chan);
196
197
while (chan->client_count < dmaengine_ref_count) {
198
__module_get(owner);
199
chan->client_count++;
200
}
201
}
202
203
/**
204
* dma_chan_get - try to grab a dma channel's parent driver module
205
* @chan - channel to grab
206
*
207
* Must be called under dma_list_mutex
208
*/
209
static int dma_chan_get(struct dma_chan *chan)
210
{
211
int err = -ENODEV;
212
struct module *owner = dma_chan_to_owner(chan);
213
214
if (chan->client_count) {
215
__module_get(owner);
216
err = 0;
217
} else if (try_module_get(owner))
218
err = 0;
219
220
if (err == 0)
221
chan->client_count++;
222
223
/* allocate upon first client reference */
224
if (chan->client_count == 1 && err == 0) {
225
int desc_cnt = chan->device->device_alloc_chan_resources(chan);
226
227
if (desc_cnt < 0) {
228
err = desc_cnt;
229
chan->client_count = 0;
230
module_put(owner);
231
} else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
232
balance_ref_count(chan);
233
}
234
235
return err;
236
}
237
238
/**
239
* dma_chan_put - drop a reference to a dma channel's parent driver module
240
* @chan - channel to release
241
*
242
* Must be called under dma_list_mutex
243
*/
244
static void dma_chan_put(struct dma_chan *chan)
245
{
246
if (!chan->client_count)
247
return; /* this channel failed alloc_chan_resources */
248
chan->client_count--;
249
module_put(dma_chan_to_owner(chan));
250
if (chan->client_count == 0)
251
chan->device->device_free_chan_resources(chan);
252
}
253
254
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
255
{
256
enum dma_status status;
257
unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
258
259
dma_async_issue_pending(chan);
260
do {
261
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
262
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
263
printk(KERN_ERR "dma_sync_wait_timeout!\n");
264
return DMA_ERROR;
265
}
266
} while (status == DMA_IN_PROGRESS);
267
268
return status;
269
}
270
EXPORT_SYMBOL(dma_sync_wait);
271
272
/**
273
* dma_cap_mask_all - enable iteration over all operation types
274
*/
275
static dma_cap_mask_t dma_cap_mask_all;
276
277
/**
278
* dma_chan_tbl_ent - tracks channel allocations per core/operation
279
* @chan - associated channel for this entry
280
*/
281
struct dma_chan_tbl_ent {
282
struct dma_chan *chan;
283
};
284
285
/**
286
* channel_table - percpu lookup table for memory-to-memory offload providers
287
*/
288
static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
289
290
static int __init dma_channel_table_init(void)
291
{
292
enum dma_transaction_type cap;
293
int err = 0;
294
295
bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
296
297
/* 'interrupt', 'private', and 'slave' are channel capabilities,
298
* but are not associated with an operation so they do not need
299
* an entry in the channel_table
300
*/
301
clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
302
clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
303
clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
304
305
for_each_dma_cap_mask(cap, dma_cap_mask_all) {
306
channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
307
if (!channel_table[cap]) {
308
err = -ENOMEM;
309
break;
310
}
311
}
312
313
if (err) {
314
pr_err("dmaengine: initialization failure\n");
315
for_each_dma_cap_mask(cap, dma_cap_mask_all)
316
if (channel_table[cap])
317
free_percpu(channel_table[cap]);
318
}
319
320
return err;
321
}
322
arch_initcall(dma_channel_table_init);
323
324
/**
325
* dma_find_channel - find a channel to carry out the operation
326
* @tx_type: transaction type
327
*/
328
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
329
{
330
return this_cpu_read(channel_table[tx_type]->chan);
331
}
332
EXPORT_SYMBOL(dma_find_channel);
333
334
/**
335
* dma_issue_pending_all - flush all pending operations across all channels
336
*/
337
void dma_issue_pending_all(void)
338
{
339
struct dma_device *device;
340
struct dma_chan *chan;
341
342
rcu_read_lock();
343
list_for_each_entry_rcu(device, &dma_device_list, global_node) {
344
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
345
continue;
346
list_for_each_entry(chan, &device->channels, device_node)
347
if (chan->client_count)
348
device->device_issue_pending(chan);
349
}
350
rcu_read_unlock();
351
}
352
EXPORT_SYMBOL(dma_issue_pending_all);
353
354
/**
355
* nth_chan - returns the nth channel of the given capability
356
* @cap: capability to match
357
* @n: nth channel desired
358
*
359
* Defaults to returning the channel with the desired capability and the
360
* lowest reference count when 'n' cannot be satisfied. Must be called
361
* under dma_list_mutex.
362
*/
363
static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
364
{
365
struct dma_device *device;
366
struct dma_chan *chan;
367
struct dma_chan *ret = NULL;
368
struct dma_chan *min = NULL;
369
370
list_for_each_entry(device, &dma_device_list, global_node) {
371
if (!dma_has_cap(cap, device->cap_mask) ||
372
dma_has_cap(DMA_PRIVATE, device->cap_mask))
373
continue;
374
list_for_each_entry(chan, &device->channels, device_node) {
375
if (!chan->client_count)
376
continue;
377
if (!min)
378
min = chan;
379
else if (chan->table_count < min->table_count)
380
min = chan;
381
382
if (n-- == 0) {
383
ret = chan;
384
break; /* done */
385
}
386
}
387
if (ret)
388
break; /* done */
389
}
390
391
if (!ret)
392
ret = min;
393
394
if (ret)
395
ret->table_count++;
396
397
return ret;
398
}
399
400
/**
401
* dma_channel_rebalance - redistribute the available channels
402
*
403
* Optimize for cpu isolation (each cpu gets a dedicated channel for an
404
* operation type) in the SMP case, and operation isolation (avoid
405
* multi-tasking channels) in the non-SMP case. Must be called under
406
* dma_list_mutex.
407
*/
408
static void dma_channel_rebalance(void)
409
{
410
struct dma_chan *chan;
411
struct dma_device *device;
412
int cpu;
413
int cap;
414
int n;
415
416
/* undo the last distribution */
417
for_each_dma_cap_mask(cap, dma_cap_mask_all)
418
for_each_possible_cpu(cpu)
419
per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
420
421
list_for_each_entry(device, &dma_device_list, global_node) {
422
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
423
continue;
424
list_for_each_entry(chan, &device->channels, device_node)
425
chan->table_count = 0;
426
}
427
428
/* don't populate the channel_table if no clients are available */
429
if (!dmaengine_ref_count)
430
return;
431
432
/* redistribute available channels */
433
n = 0;
434
for_each_dma_cap_mask(cap, dma_cap_mask_all)
435
for_each_online_cpu(cpu) {
436
if (num_possible_cpus() > 1)
437
chan = nth_chan(cap, n++);
438
else
439
chan = nth_chan(cap, -1);
440
441
per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
442
}
443
}
444
445
static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
446
dma_filter_fn fn, void *fn_param)
447
{
448
struct dma_chan *chan;
449
450
if (!__dma_device_satisfies_mask(dev, mask)) {
451
pr_debug("%s: wrong capabilities\n", __func__);
452
return NULL;
453
}
454
/* devices with multiple channels need special handling as we need to
455
* ensure that all channels are either private or public.
456
*/
457
if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
458
list_for_each_entry(chan, &dev->channels, device_node) {
459
/* some channels are already publicly allocated */
460
if (chan->client_count)
461
return NULL;
462
}
463
464
list_for_each_entry(chan, &dev->channels, device_node) {
465
if (chan->client_count) {
466
pr_debug("%s: %s busy\n",
467
__func__, dma_chan_name(chan));
468
continue;
469
}
470
if (fn && !fn(chan, fn_param)) {
471
pr_debug("%s: %s filter said false\n",
472
__func__, dma_chan_name(chan));
473
continue;
474
}
475
return chan;
476
}
477
478
return NULL;
479
}
480
481
/**
482
* dma_request_channel - try to allocate an exclusive channel
483
* @mask: capabilities that the channel must satisfy
484
* @fn: optional callback to disposition available channels
485
* @fn_param: opaque parameter to pass to dma_filter_fn
486
*/
487
struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
488
{
489
struct dma_device *device, *_d;
490
struct dma_chan *chan = NULL;
491
int err;
492
493
/* Find a channel */
494
mutex_lock(&dma_list_mutex);
495
list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
496
chan = private_candidate(mask, device, fn, fn_param);
497
if (chan) {
498
/* Found a suitable channel, try to grab, prep, and
499
* return it. We first set DMA_PRIVATE to disable
500
* balance_ref_count as this channel will not be
501
* published in the general-purpose allocator
502
*/
503
dma_cap_set(DMA_PRIVATE, device->cap_mask);
504
device->privatecnt++;
505
err = dma_chan_get(chan);
506
507
if (err == -ENODEV) {
508
pr_debug("%s: %s module removed\n", __func__,
509
dma_chan_name(chan));
510
list_del_rcu(&device->global_node);
511
} else if (err)
512
pr_err("dmaengine: failed to get %s: (%d)\n",
513
dma_chan_name(chan), err);
514
else
515
break;
516
if (--device->privatecnt == 0)
517
dma_cap_clear(DMA_PRIVATE, device->cap_mask);
518
chan = NULL;
519
}
520
}
521
mutex_unlock(&dma_list_mutex);
522
523
pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
524
chan ? dma_chan_name(chan) : NULL);
525
526
return chan;
527
}
528
EXPORT_SYMBOL_GPL(__dma_request_channel);
529
530
void dma_release_channel(struct dma_chan *chan)
531
{
532
mutex_lock(&dma_list_mutex);
533
WARN_ONCE(chan->client_count != 1,
534
"chan reference count %d != 1\n", chan->client_count);
535
dma_chan_put(chan);
536
/* drop PRIVATE cap enabled by __dma_request_channel() */
537
if (--chan->device->privatecnt == 0)
538
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
539
mutex_unlock(&dma_list_mutex);
540
}
541
EXPORT_SYMBOL_GPL(dma_release_channel);
542
543
/**
544
* dmaengine_get - register interest in dma_channels
545
*/
546
void dmaengine_get(void)
547
{
548
struct dma_device *device, *_d;
549
struct dma_chan *chan;
550
int err;
551
552
mutex_lock(&dma_list_mutex);
553
dmaengine_ref_count++;
554
555
/* try to grab channels */
556
list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
557
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
558
continue;
559
list_for_each_entry(chan, &device->channels, device_node) {
560
err = dma_chan_get(chan);
561
if (err == -ENODEV) {
562
/* module removed before we could use it */
563
list_del_rcu(&device->global_node);
564
break;
565
} else if (err)
566
pr_err("dmaengine: failed to get %s: (%d)\n",
567
dma_chan_name(chan), err);
568
}
569
}
570
571
/* if this is the first reference and there were channels
572
* waiting we need to rebalance to get those channels
573
* incorporated into the channel table
574
*/
575
if (dmaengine_ref_count == 1)
576
dma_channel_rebalance();
577
mutex_unlock(&dma_list_mutex);
578
}
579
EXPORT_SYMBOL(dmaengine_get);
580
581
/**
582
* dmaengine_put - let dma drivers be removed when ref_count == 0
583
*/
584
void dmaengine_put(void)
585
{
586
struct dma_device *device;
587
struct dma_chan *chan;
588
589
mutex_lock(&dma_list_mutex);
590
dmaengine_ref_count--;
591
BUG_ON(dmaengine_ref_count < 0);
592
/* drop channel references */
593
list_for_each_entry(device, &dma_device_list, global_node) {
594
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
595
continue;
596
list_for_each_entry(chan, &device->channels, device_node)
597
dma_chan_put(chan);
598
}
599
mutex_unlock(&dma_list_mutex);
600
}
601
EXPORT_SYMBOL(dmaengine_put);
602
603
static bool device_has_all_tx_types(struct dma_device *device)
604
{
605
/* A device that satisfies this test has channels that will never cause
606
* an async_tx channel switch event as all possible operation types can
607
* be handled.
608
*/
609
#ifdef CONFIG_ASYNC_TX_DMA
610
if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
611
return false;
612
#endif
613
614
#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
615
if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
616
return false;
617
#endif
618
619
#if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
620
if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
621
return false;
622
#endif
623
624
#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
625
if (!dma_has_cap(DMA_XOR, device->cap_mask))
626
return false;
627
628
#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
629
if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
630
return false;
631
#endif
632
#endif
633
634
#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
635
if (!dma_has_cap(DMA_PQ, device->cap_mask))
636
return false;
637
638
#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
639
if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
640
return false;
641
#endif
642
#endif
643
644
return true;
645
}
646
647
static int get_dma_id(struct dma_device *device)
648
{
649
int rc;
650
651
idr_retry:
652
if (!idr_pre_get(&dma_idr, GFP_KERNEL))
653
return -ENOMEM;
654
mutex_lock(&dma_list_mutex);
655
rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
656
mutex_unlock(&dma_list_mutex);
657
if (rc == -EAGAIN)
658
goto idr_retry;
659
else if (rc != 0)
660
return rc;
661
662
return 0;
663
}
664
665
/**
666
* dma_async_device_register - registers DMA devices found
667
* @device: &dma_device
668
*/
669
int dma_async_device_register(struct dma_device *device)
670
{
671
int chancnt = 0, rc;
672
struct dma_chan* chan;
673
atomic_t *idr_ref;
674
675
if (!device)
676
return -ENODEV;
677
678
/* validate device routines */
679
BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
680
!device->device_prep_dma_memcpy);
681
BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
682
!device->device_prep_dma_xor);
683
BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
684
!device->device_prep_dma_xor_val);
685
BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
686
!device->device_prep_dma_pq);
687
BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
688
!device->device_prep_dma_pq_val);
689
BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
690
!device->device_prep_dma_memset);
691
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
692
!device->device_prep_dma_interrupt);
693
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
694
!device->device_prep_dma_sg);
695
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
696
!device->device_prep_slave_sg);
697
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
698
!device->device_prep_dma_cyclic);
699
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
700
!device->device_control);
701
702
BUG_ON(!device->device_alloc_chan_resources);
703
BUG_ON(!device->device_free_chan_resources);
704
BUG_ON(!device->device_tx_status);
705
BUG_ON(!device->device_issue_pending);
706
BUG_ON(!device->dev);
707
708
/* note: this only matters in the
709
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
710
*/
711
if (device_has_all_tx_types(device))
712
dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
713
714
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
715
if (!idr_ref)
716
return -ENOMEM;
717
rc = get_dma_id(device);
718
if (rc != 0) {
719
kfree(idr_ref);
720
return rc;
721
}
722
723
atomic_set(idr_ref, 0);
724
725
/* represent channels in sysfs. Probably want devs too */
726
list_for_each_entry(chan, &device->channels, device_node) {
727
rc = -ENOMEM;
728
chan->local = alloc_percpu(typeof(*chan->local));
729
if (chan->local == NULL)
730
goto err_out;
731
chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
732
if (chan->dev == NULL) {
733
free_percpu(chan->local);
734
chan->local = NULL;
735
goto err_out;
736
}
737
738
chan->chan_id = chancnt++;
739
chan->dev->device.class = &dma_devclass;
740
chan->dev->device.parent = device->dev;
741
chan->dev->chan = chan;
742
chan->dev->idr_ref = idr_ref;
743
chan->dev->dev_id = device->dev_id;
744
atomic_inc(idr_ref);
745
dev_set_name(&chan->dev->device, "dma%dchan%d",
746
device->dev_id, chan->chan_id);
747
748
rc = device_register(&chan->dev->device);
749
if (rc) {
750
free_percpu(chan->local);
751
chan->local = NULL;
752
kfree(chan->dev);
753
atomic_dec(idr_ref);
754
goto err_out;
755
}
756
chan->client_count = 0;
757
}
758
device->chancnt = chancnt;
759
760
mutex_lock(&dma_list_mutex);
761
/* take references on public channels */
762
if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
763
list_for_each_entry(chan, &device->channels, device_node) {
764
/* if clients are already waiting for channels we need
765
* to take references on their behalf
766
*/
767
if (dma_chan_get(chan) == -ENODEV) {
768
/* note we can only get here for the first
769
* channel as the remaining channels are
770
* guaranteed to get a reference
771
*/
772
rc = -ENODEV;
773
mutex_unlock(&dma_list_mutex);
774
goto err_out;
775
}
776
}
777
list_add_tail_rcu(&device->global_node, &dma_device_list);
778
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
779
device->privatecnt++; /* Always private */
780
dma_channel_rebalance();
781
mutex_unlock(&dma_list_mutex);
782
783
return 0;
784
785
err_out:
786
/* if we never registered a channel just release the idr */
787
if (atomic_read(idr_ref) == 0) {
788
mutex_lock(&dma_list_mutex);
789
idr_remove(&dma_idr, device->dev_id);
790
mutex_unlock(&dma_list_mutex);
791
kfree(idr_ref);
792
return rc;
793
}
794
795
list_for_each_entry(chan, &device->channels, device_node) {
796
if (chan->local == NULL)
797
continue;
798
mutex_lock(&dma_list_mutex);
799
chan->dev->chan = NULL;
800
mutex_unlock(&dma_list_mutex);
801
device_unregister(&chan->dev->device);
802
free_percpu(chan->local);
803
}
804
return rc;
805
}
806
EXPORT_SYMBOL(dma_async_device_register);
807
808
/**
809
* dma_async_device_unregister - unregister a DMA device
810
* @device: &dma_device
811
*
812
* This routine is called by dma driver exit routines, dmaengine holds module
813
* references to prevent it being called while channels are in use.
814
*/
815
void dma_async_device_unregister(struct dma_device *device)
816
{
817
struct dma_chan *chan;
818
819
mutex_lock(&dma_list_mutex);
820
list_del_rcu(&device->global_node);
821
dma_channel_rebalance();
822
mutex_unlock(&dma_list_mutex);
823
824
list_for_each_entry(chan, &device->channels, device_node) {
825
WARN_ONCE(chan->client_count,
826
"%s called while %d clients hold a reference\n",
827
__func__, chan->client_count);
828
mutex_lock(&dma_list_mutex);
829
chan->dev->chan = NULL;
830
mutex_unlock(&dma_list_mutex);
831
device_unregister(&chan->dev->device);
832
free_percpu(chan->local);
833
}
834
}
835
EXPORT_SYMBOL(dma_async_device_unregister);
836
837
/**
838
* dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
839
* @chan: DMA channel to offload copy to
840
* @dest: destination address (virtual)
841
* @src: source address (virtual)
842
* @len: length
843
*
844
* Both @dest and @src must be mappable to a bus address according to the
845
* DMA mapping API rules for streaming mappings.
846
* Both @dest and @src must stay memory resident (kernel memory or locked
847
* user space pages).
848
*/
849
dma_cookie_t
850
dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
851
void *src, size_t len)
852
{
853
struct dma_device *dev = chan->device;
854
struct dma_async_tx_descriptor *tx;
855
dma_addr_t dma_dest, dma_src;
856
dma_cookie_t cookie;
857
unsigned long flags;
858
859
dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
860
dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
861
flags = DMA_CTRL_ACK |
862
DMA_COMPL_SRC_UNMAP_SINGLE |
863
DMA_COMPL_DEST_UNMAP_SINGLE;
864
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
865
866
if (!tx) {
867
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
868
dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
869
return -ENOMEM;
870
}
871
872
tx->callback = NULL;
873
cookie = tx->tx_submit(tx);
874
875
preempt_disable();
876
__this_cpu_add(chan->local->bytes_transferred, len);
877
__this_cpu_inc(chan->local->memcpy_count);
878
preempt_enable();
879
880
return cookie;
881
}
882
EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
883
884
/**
885
* dma_async_memcpy_buf_to_pg - offloaded copy from address to page
886
* @chan: DMA channel to offload copy to
887
* @page: destination page
888
* @offset: offset in page to copy to
889
* @kdata: source address (virtual)
890
* @len: length
891
*
892
* Both @page/@offset and @kdata must be mappable to a bus address according
893
* to the DMA mapping API rules for streaming mappings.
894
* Both @page/@offset and @kdata must stay memory resident (kernel memory or
895
* locked user space pages)
896
*/
897
dma_cookie_t
898
dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
899
unsigned int offset, void *kdata, size_t len)
900
{
901
struct dma_device *dev = chan->device;
902
struct dma_async_tx_descriptor *tx;
903
dma_addr_t dma_dest, dma_src;
904
dma_cookie_t cookie;
905
unsigned long flags;
906
907
dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
908
dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
909
flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
910
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
911
912
if (!tx) {
913
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
914
dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
915
return -ENOMEM;
916
}
917
918
tx->callback = NULL;
919
cookie = tx->tx_submit(tx);
920
921
preempt_disable();
922
__this_cpu_add(chan->local->bytes_transferred, len);
923
__this_cpu_inc(chan->local->memcpy_count);
924
preempt_enable();
925
926
return cookie;
927
}
928
EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
929
930
/**
931
* dma_async_memcpy_pg_to_pg - offloaded copy from page to page
932
* @chan: DMA channel to offload copy to
933
* @dest_pg: destination page
934
* @dest_off: offset in page to copy to
935
* @src_pg: source page
936
* @src_off: offset in page to copy from
937
* @len: length
938
*
939
* Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
940
* address according to the DMA mapping API rules for streaming mappings.
941
* Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
942
* (kernel memory or locked user space pages).
943
*/
944
dma_cookie_t
945
dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
946
unsigned int dest_off, struct page *src_pg, unsigned int src_off,
947
size_t len)
948
{
949
struct dma_device *dev = chan->device;
950
struct dma_async_tx_descriptor *tx;
951
dma_addr_t dma_dest, dma_src;
952
dma_cookie_t cookie;
953
unsigned long flags;
954
955
dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
956
dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
957
DMA_FROM_DEVICE);
958
flags = DMA_CTRL_ACK;
959
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
960
961
if (!tx) {
962
dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
963
dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
964
return -ENOMEM;
965
}
966
967
tx->callback = NULL;
968
cookie = tx->tx_submit(tx);
969
970
preempt_disable();
971
__this_cpu_add(chan->local->bytes_transferred, len);
972
__this_cpu_inc(chan->local->memcpy_count);
973
preempt_enable();
974
975
return cookie;
976
}
977
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
978
979
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
980
struct dma_chan *chan)
981
{
982
tx->chan = chan;
983
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
984
spin_lock_init(&tx->lock);
985
#endif
986
}
987
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
988
989
/* dma_wait_for_async_tx - spin wait for a transaction to complete
990
* @tx: in-flight transaction to wait on
991
*/
992
enum dma_status
993
dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
994
{
995
unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
996
997
if (!tx)
998
return DMA_SUCCESS;
999
1000
while (tx->cookie == -EBUSY) {
1001
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1002
pr_err("%s timeout waiting for descriptor submission\n",
1003
__func__);
1004
return DMA_ERROR;
1005
}
1006
cpu_relax();
1007
}
1008
return dma_sync_wait(tx->chan, tx->cookie);
1009
}
1010
EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1011
1012
/* dma_run_dependencies - helper routine for dma drivers to process
1013
* (start) dependent operations on their target channel
1014
* @tx: transaction with dependencies
1015
*/
1016
void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1017
{
1018
struct dma_async_tx_descriptor *dep = txd_next(tx);
1019
struct dma_async_tx_descriptor *dep_next;
1020
struct dma_chan *chan;
1021
1022
if (!dep)
1023
return;
1024
1025
/* we'll submit tx->next now, so clear the link */
1026
txd_clear_next(tx);
1027
chan = dep->chan;
1028
1029
/* keep submitting up until a channel switch is detected
1030
* in that case we will be called again as a result of
1031
* processing the interrupt from async_tx_channel_switch
1032
*/
1033
for (; dep; dep = dep_next) {
1034
txd_lock(dep);
1035
txd_clear_parent(dep);
1036
dep_next = txd_next(dep);
1037
if (dep_next && dep_next->chan == chan)
1038
txd_clear_next(dep); /* ->next will be submitted */
1039
else
1040
dep_next = NULL; /* submit current dep and terminate */
1041
txd_unlock(dep);
1042
1043
dep->tx_submit(dep);
1044
}
1045
1046
chan->device->device_issue_pending(chan);
1047
}
1048
EXPORT_SYMBOL_GPL(dma_run_dependencies);
1049
1050
static int __init dma_bus_init(void)
1051
{
1052
idr_init(&dma_idr);
1053
mutex_init(&dma_list_mutex);
1054
return class_register(&dma_devclass);
1055
}
1056
arch_initcall(dma_bus_init);
1057
1058
1059
1060