Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma-buf/dma-resv.c
26278 views
1
// SPDX-License-Identifier: MIT
2
/*
3
* Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4
*
5
* Based on bo.c which bears the following copyright notice,
6
* but is dual licensed:
7
*
8
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
9
* All Rights Reserved.
10
*
11
* Permission is hereby granted, free of charge, to any person obtaining a
12
* copy of this software and associated documentation files (the
13
* "Software"), to deal in the Software without restriction, including
14
* without limitation the rights to use, copy, modify, merge, publish,
15
* distribute, sub license, and/or sell copies of the Software, and to
16
* permit persons to whom the Software is furnished to do so, subject to
17
* the following conditions:
18
*
19
* The above copyright notice and this permission notice (including the
20
* next paragraph) shall be included in all copies or substantial portions
21
* of the Software.
22
*
23
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29
* USE OR OTHER DEALINGS IN THE SOFTWARE.
30
*
31
**************************************************************************/
32
/*
33
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
34
*/
35
36
#include <linux/dma-resv.h>
37
#include <linux/dma-fence-array.h>
38
#include <linux/export.h>
39
#include <linux/mm.h>
40
#include <linux/sched/mm.h>
41
#include <linux/mmu_notifier.h>
42
#include <linux/seq_file.h>
43
44
/**
45
* DOC: Reservation Object Overview
46
*
47
* The reservation object provides a mechanism to manage a container of
48
* dma_fence object associated with a resource. A reservation object
49
* can have any number of fences attaches to it. Each fence carries an usage
50
* parameter determining how the operation represented by the fence is using the
51
* resource. The RCU mechanism is used to protect read access to fences from
52
* locked write-side updates.
53
*
54
* See struct dma_resv for more details.
55
*/
56
57
DEFINE_WD_CLASS(reservation_ww_class);
58
EXPORT_SYMBOL(reservation_ww_class);
59
60
/* Mask for the lower fence pointer bits */
61
#define DMA_RESV_LIST_MASK 0x3
62
63
struct dma_resv_list {
64
struct rcu_head rcu;
65
u32 num_fences, max_fences;
66
struct dma_fence __rcu *table[];
67
};
68
69
/* Extract the fence and usage flags from an RCU protected entry in the list. */
70
static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index,
71
struct dma_resv *resv, struct dma_fence **fence,
72
enum dma_resv_usage *usage)
73
{
74
long tmp;
75
76
tmp = (long)rcu_dereference_check(list->table[index],
77
resv ? dma_resv_held(resv) : true);
78
*fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK);
79
if (usage)
80
*usage = tmp & DMA_RESV_LIST_MASK;
81
}
82
83
/* Set the fence and usage flags at the specific index in the list. */
84
static void dma_resv_list_set(struct dma_resv_list *list,
85
unsigned int index,
86
struct dma_fence *fence,
87
enum dma_resv_usage usage)
88
{
89
long tmp = ((long)fence) | usage;
90
91
RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
92
}
93
94
/*
95
* Allocate a new dma_resv_list and make sure to correctly initialize
96
* max_fences.
97
*/
98
static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
99
{
100
struct dma_resv_list *list;
101
size_t size;
102
103
/* Round up to the next kmalloc bucket size. */
104
size = kmalloc_size_roundup(struct_size(list, table, max_fences));
105
106
list = kmalloc(size, GFP_KERNEL);
107
if (!list)
108
return NULL;
109
110
/* Given the resulting bucket size, recalculated max_fences. */
111
list->max_fences = (size - offsetof(typeof(*list), table)) /
112
sizeof(*list->table);
113
114
return list;
115
}
116
117
/* Free a dma_resv_list and make sure to drop all references. */
118
static void dma_resv_list_free(struct dma_resv_list *list)
119
{
120
unsigned int i;
121
122
if (!list)
123
return;
124
125
for (i = 0; i < list->num_fences; ++i) {
126
struct dma_fence *fence;
127
128
dma_resv_list_entry(list, i, NULL, &fence, NULL);
129
dma_fence_put(fence);
130
}
131
kfree_rcu(list, rcu);
132
}
133
134
/**
135
* dma_resv_init - initialize a reservation object
136
* @obj: the reservation object
137
*/
138
void dma_resv_init(struct dma_resv *obj)
139
{
140
ww_mutex_init(&obj->lock, &reservation_ww_class);
141
142
RCU_INIT_POINTER(obj->fences, NULL);
143
}
144
EXPORT_SYMBOL(dma_resv_init);
145
146
/**
147
* dma_resv_fini - destroys a reservation object
148
* @obj: the reservation object
149
*/
150
void dma_resv_fini(struct dma_resv *obj)
151
{
152
/*
153
* This object should be dead and all references must have
154
* been released to it, so no need to be protected with rcu.
155
*/
156
dma_resv_list_free(rcu_dereference_protected(obj->fences, true));
157
ww_mutex_destroy(&obj->lock);
158
}
159
EXPORT_SYMBOL(dma_resv_fini);
160
161
/* Dereference the fences while ensuring RCU rules */
162
static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj)
163
{
164
return rcu_dereference_check(obj->fences, dma_resv_held(obj));
165
}
166
167
/**
168
* dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
169
* @obj: reservation object
170
* @num_fences: number of fences we want to add
171
*
172
* Should be called before dma_resv_add_fence(). Must be called with @obj
173
* locked through dma_resv_lock().
174
*
175
* Note that the preallocated slots need to be re-reserved if @obj is unlocked
176
* at any time before calling dma_resv_add_fence(). This is validated when
177
* CONFIG_DEBUG_MUTEXES is enabled.
178
*
179
* RETURNS
180
* Zero for success, or -errno
181
*/
182
int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
183
{
184
struct dma_resv_list *old, *new;
185
unsigned int i, j, k, max;
186
187
dma_resv_assert_held(obj);
188
189
/* Driver and component code should never call this function with
190
* num_fences=0. If they do it usually points to bugs when calculating
191
* the number of needed fences dynamically.
192
*/
193
if (WARN_ON(!num_fences))
194
return -EINVAL;
195
196
old = dma_resv_fences_list(obj);
197
if (old && old->max_fences) {
198
if ((old->num_fences + num_fences) <= old->max_fences)
199
return 0;
200
max = max(old->num_fences + num_fences, old->max_fences * 2);
201
} else {
202
max = max(4ul, roundup_pow_of_two(num_fences));
203
}
204
205
new = dma_resv_list_alloc(max);
206
if (!new)
207
return -ENOMEM;
208
209
/*
210
* no need to bump fence refcounts, rcu_read access
211
* requires the use of kref_get_unless_zero, and the
212
* references from the old struct are carried over to
213
* the new.
214
*/
215
for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) {
216
enum dma_resv_usage usage;
217
struct dma_fence *fence;
218
219
dma_resv_list_entry(old, i, obj, &fence, &usage);
220
if (dma_fence_is_signaled(fence))
221
RCU_INIT_POINTER(new->table[--k], fence);
222
else
223
dma_resv_list_set(new, j++, fence, usage);
224
}
225
new->num_fences = j;
226
227
/*
228
* We are not changing the effective set of fences here so can
229
* merely update the pointer to the new array; both existing
230
* readers and new readers will see exactly the same set of
231
* active (unsignaled) fences. Individual fences and the
232
* old array are protected by RCU and so will not vanish under
233
* the gaze of the rcu_read_lock() readers.
234
*/
235
rcu_assign_pointer(obj->fences, new);
236
237
if (!old)
238
return 0;
239
240
/* Drop the references to the signaled fences */
241
for (i = k; i < max; ++i) {
242
struct dma_fence *fence;
243
244
fence = rcu_dereference_protected(new->table[i],
245
dma_resv_held(obj));
246
dma_fence_put(fence);
247
}
248
kfree_rcu(old, rcu);
249
250
return 0;
251
}
252
EXPORT_SYMBOL(dma_resv_reserve_fences);
253
254
#ifdef CONFIG_DEBUG_MUTEXES
255
/**
256
* dma_resv_reset_max_fences - reset fences for debugging
257
* @obj: the dma_resv object to reset
258
*
259
* Reset the number of pre-reserved fence slots to test that drivers do
260
* correct slot allocation using dma_resv_reserve_fences(). See also
261
* &dma_resv_list.max_fences.
262
*/
263
void dma_resv_reset_max_fences(struct dma_resv *obj)
264
{
265
struct dma_resv_list *fences = dma_resv_fences_list(obj);
266
267
dma_resv_assert_held(obj);
268
269
/* Test fence slot reservation */
270
if (fences)
271
fences->max_fences = fences->num_fences;
272
}
273
EXPORT_SYMBOL(dma_resv_reset_max_fences);
274
#endif
275
276
/**
277
* dma_resv_add_fence - Add a fence to the dma_resv obj
278
* @obj: the reservation object
279
* @fence: the fence to add
280
* @usage: how the fence is used, see enum dma_resv_usage
281
*
282
* Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
283
* dma_resv_reserve_fences() has been called.
284
*
285
* See also &dma_resv.fence for a discussion of the semantics.
286
*/
287
void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
288
enum dma_resv_usage usage)
289
{
290
struct dma_resv_list *fobj;
291
struct dma_fence *old;
292
unsigned int i, count;
293
294
dma_fence_get(fence);
295
296
dma_resv_assert_held(obj);
297
298
/* Drivers should not add containers here, instead add each fence
299
* individually.
300
*/
301
WARN_ON(dma_fence_is_container(fence));
302
303
fobj = dma_resv_fences_list(obj);
304
count = fobj->num_fences;
305
306
for (i = 0; i < count; ++i) {
307
enum dma_resv_usage old_usage;
308
309
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
310
if ((old->context == fence->context && old_usage >= usage &&
311
dma_fence_is_later_or_same(fence, old)) ||
312
dma_fence_is_signaled(old)) {
313
dma_resv_list_set(fobj, i, fence, usage);
314
dma_fence_put(old);
315
return;
316
}
317
}
318
319
BUG_ON(fobj->num_fences >= fobj->max_fences);
320
count++;
321
322
dma_resv_list_set(fobj, i, fence, usage);
323
/* fence update must be visible before we extend the num_fences */
324
smp_wmb();
325
fobj->num_fences = count;
326
}
327
EXPORT_SYMBOL(dma_resv_add_fence);
328
329
/**
330
* dma_resv_replace_fences - replace fences in the dma_resv obj
331
* @obj: the reservation object
332
* @context: the context of the fences to replace
333
* @replacement: the new fence to use instead
334
* @usage: how the new fence is used, see enum dma_resv_usage
335
*
336
* Replace fences with a specified context with a new fence. Only valid if the
337
* operation represented by the original fence has no longer access to the
338
* resources represented by the dma_resv object when the new fence completes.
339
*
340
* And example for using this is replacing a preemption fence with a page table
341
* update fence which makes the resource inaccessible.
342
*/
343
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
344
struct dma_fence *replacement,
345
enum dma_resv_usage usage)
346
{
347
struct dma_resv_list *list;
348
unsigned int i;
349
350
dma_resv_assert_held(obj);
351
352
list = dma_resv_fences_list(obj);
353
for (i = 0; list && i < list->num_fences; ++i) {
354
struct dma_fence *old;
355
356
dma_resv_list_entry(list, i, obj, &old, NULL);
357
if (old->context != context)
358
continue;
359
360
dma_resv_list_set(list, i, dma_fence_get(replacement), usage);
361
dma_fence_put(old);
362
}
363
}
364
EXPORT_SYMBOL(dma_resv_replace_fences);
365
366
/* Restart the unlocked iteration by initializing the cursor object. */
367
static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
368
{
369
cursor->index = 0;
370
cursor->num_fences = 0;
371
cursor->fences = dma_resv_fences_list(cursor->obj);
372
if (cursor->fences)
373
cursor->num_fences = cursor->fences->num_fences;
374
cursor->is_restarted = true;
375
}
376
377
/* Walk to the next not signaled fence and grab a reference to it */
378
static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
379
{
380
if (!cursor->fences)
381
return;
382
383
do {
384
/* Drop the reference from the previous round */
385
dma_fence_put(cursor->fence);
386
387
if (cursor->index >= cursor->num_fences) {
388
cursor->fence = NULL;
389
break;
390
391
}
392
393
dma_resv_list_entry(cursor->fences, cursor->index++,
394
cursor->obj, &cursor->fence,
395
&cursor->fence_usage);
396
cursor->fence = dma_fence_get_rcu(cursor->fence);
397
if (!cursor->fence) {
398
dma_resv_iter_restart_unlocked(cursor);
399
continue;
400
}
401
402
if (!dma_fence_is_signaled(cursor->fence) &&
403
cursor->usage >= cursor->fence_usage)
404
break;
405
} while (true);
406
}
407
408
/**
409
* dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
410
* @cursor: the cursor with the current position
411
*
412
* Subsequent fences are iterated with dma_resv_iter_next_unlocked().
413
*
414
* Beware that the iterator can be restarted. Code which accumulates statistics
415
* or similar needs to check for this with dma_resv_iter_is_restarted(). For
416
* this reason prefer the locked dma_resv_iter_first() whenever possible.
417
*
418
* Returns the first fence from an unlocked dma_resv obj.
419
*/
420
struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
421
{
422
rcu_read_lock();
423
do {
424
dma_resv_iter_restart_unlocked(cursor);
425
dma_resv_iter_walk_unlocked(cursor);
426
} while (dma_resv_fences_list(cursor->obj) != cursor->fences);
427
rcu_read_unlock();
428
429
return cursor->fence;
430
}
431
EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
432
433
/**
434
* dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
435
* @cursor: the cursor with the current position
436
*
437
* Beware that the iterator can be restarted. Code which accumulates statistics
438
* or similar needs to check for this with dma_resv_iter_is_restarted(). For
439
* this reason prefer the locked dma_resv_iter_next() whenever possible.
440
*
441
* Returns the next fence from an unlocked dma_resv obj.
442
*/
443
struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
444
{
445
bool restart;
446
447
rcu_read_lock();
448
cursor->is_restarted = false;
449
restart = dma_resv_fences_list(cursor->obj) != cursor->fences;
450
do {
451
if (restart)
452
dma_resv_iter_restart_unlocked(cursor);
453
dma_resv_iter_walk_unlocked(cursor);
454
restart = true;
455
} while (dma_resv_fences_list(cursor->obj) != cursor->fences);
456
rcu_read_unlock();
457
458
return cursor->fence;
459
}
460
EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
461
462
/**
463
* dma_resv_iter_first - first fence from a locked dma_resv object
464
* @cursor: cursor to record the current position
465
*
466
* Subsequent fences are iterated with dma_resv_iter_next_unlocked().
467
*
468
* Return the first fence in the dma_resv object while holding the
469
* &dma_resv.lock.
470
*/
471
struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
472
{
473
struct dma_fence *fence;
474
475
dma_resv_assert_held(cursor->obj);
476
477
cursor->index = 0;
478
cursor->fences = dma_resv_fences_list(cursor->obj);
479
480
fence = dma_resv_iter_next(cursor);
481
cursor->is_restarted = true;
482
return fence;
483
}
484
EXPORT_SYMBOL_GPL(dma_resv_iter_first);
485
486
/**
487
* dma_resv_iter_next - next fence from a locked dma_resv object
488
* @cursor: cursor to record the current position
489
*
490
* Return the next fences from the dma_resv object while holding the
491
* &dma_resv.lock.
492
*/
493
struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
494
{
495
struct dma_fence *fence;
496
497
dma_resv_assert_held(cursor->obj);
498
499
cursor->is_restarted = false;
500
501
do {
502
if (!cursor->fences ||
503
cursor->index >= cursor->fences->num_fences)
504
return NULL;
505
506
dma_resv_list_entry(cursor->fences, cursor->index++,
507
cursor->obj, &fence, &cursor->fence_usage);
508
} while (cursor->fence_usage > cursor->usage);
509
510
return fence;
511
}
512
EXPORT_SYMBOL_GPL(dma_resv_iter_next);
513
514
/**
515
* dma_resv_copy_fences - Copy all fences from src to dst.
516
* @dst: the destination reservation object
517
* @src: the source reservation object
518
*
519
* Copy all fences from src to dst. dst-lock must be held.
520
*/
521
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
522
{
523
struct dma_resv_iter cursor;
524
struct dma_resv_list *list;
525
struct dma_fence *f;
526
527
dma_resv_assert_held(dst);
528
529
list = NULL;
530
531
dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP);
532
dma_resv_for_each_fence_unlocked(&cursor, f) {
533
534
if (dma_resv_iter_is_restarted(&cursor)) {
535
dma_resv_list_free(list);
536
537
list = dma_resv_list_alloc(cursor.num_fences);
538
if (!list) {
539
dma_resv_iter_end(&cursor);
540
return -ENOMEM;
541
}
542
list->num_fences = 0;
543
}
544
545
dma_fence_get(f);
546
dma_resv_list_set(list, list->num_fences++, f,
547
dma_resv_iter_usage(&cursor));
548
}
549
dma_resv_iter_end(&cursor);
550
551
list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
552
dma_resv_list_free(list);
553
return 0;
554
}
555
EXPORT_SYMBOL(dma_resv_copy_fences);
556
557
/**
558
* dma_resv_get_fences - Get an object's fences
559
* fences without update side lock held
560
* @obj: the reservation object
561
* @usage: controls which fences to include, see enum dma_resv_usage.
562
* @num_fences: the number of fences returned
563
* @fences: the array of fence ptrs returned (array is krealloc'd to the
564
* required size, and must be freed by caller)
565
*
566
* Retrieve all fences from the reservation object.
567
* Returns either zero or -ENOMEM.
568
*/
569
int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
570
unsigned int *num_fences, struct dma_fence ***fences)
571
{
572
struct dma_resv_iter cursor;
573
struct dma_fence *fence;
574
575
*num_fences = 0;
576
*fences = NULL;
577
578
dma_resv_iter_begin(&cursor, obj, usage);
579
dma_resv_for_each_fence_unlocked(&cursor, fence) {
580
581
if (dma_resv_iter_is_restarted(&cursor)) {
582
struct dma_fence **new_fences;
583
unsigned int count;
584
585
while (*num_fences)
586
dma_fence_put((*fences)[--(*num_fences)]);
587
588
count = cursor.num_fences + 1;
589
590
/* Eventually re-allocate the array */
591
new_fences = krealloc_array(*fences, count,
592
sizeof(void *),
593
GFP_KERNEL);
594
if (count && !new_fences) {
595
kfree(*fences);
596
*fences = NULL;
597
*num_fences = 0;
598
dma_resv_iter_end(&cursor);
599
return -ENOMEM;
600
}
601
*fences = new_fences;
602
}
603
604
(*fences)[(*num_fences)++] = dma_fence_get(fence);
605
}
606
dma_resv_iter_end(&cursor);
607
608
return 0;
609
}
610
EXPORT_SYMBOL_GPL(dma_resv_get_fences);
611
612
/**
613
* dma_resv_get_singleton - Get a single fence for all the fences
614
* @obj: the reservation object
615
* @usage: controls which fences to include, see enum dma_resv_usage.
616
* @fence: the resulting fence
617
*
618
* Get a single fence representing all the fences inside the resv object.
619
* Returns either 0 for success or -ENOMEM.
620
*
621
* Warning: This can't be used like this when adding the fence back to the resv
622
* object since that can lead to stack corruption when finalizing the
623
* dma_fence_array.
624
*
625
* Returns 0 on success and negative error values on failure.
626
*/
627
int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
628
struct dma_fence **fence)
629
{
630
struct dma_fence_array *array;
631
struct dma_fence **fences;
632
unsigned count;
633
int r;
634
635
r = dma_resv_get_fences(obj, usage, &count, &fences);
636
if (r)
637
return r;
638
639
if (count == 0) {
640
*fence = NULL;
641
return 0;
642
}
643
644
if (count == 1) {
645
*fence = fences[0];
646
kfree(fences);
647
return 0;
648
}
649
650
array = dma_fence_array_create(count, fences,
651
dma_fence_context_alloc(1),
652
1, false);
653
if (!array) {
654
while (count--)
655
dma_fence_put(fences[count]);
656
kfree(fences);
657
return -ENOMEM;
658
}
659
660
*fence = &array->base;
661
return 0;
662
}
663
EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
664
665
/**
666
* dma_resv_wait_timeout - Wait on reservation's objects fences
667
* @obj: the reservation object
668
* @usage: controls which fences to include, see enum dma_resv_usage.
669
* @intr: if true, do interruptible wait
670
* @timeout: timeout value in jiffies or zero to return immediately
671
*
672
* Callers are not required to hold specific locks, but maybe hold
673
* dma_resv_lock() already
674
* RETURNS
675
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
676
* greater than zero on success.
677
*/
678
long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
679
bool intr, unsigned long timeout)
680
{
681
long ret = timeout ? timeout : 1;
682
struct dma_resv_iter cursor;
683
struct dma_fence *fence;
684
685
dma_resv_iter_begin(&cursor, obj, usage);
686
dma_resv_for_each_fence_unlocked(&cursor, fence) {
687
688
ret = dma_fence_wait_timeout(fence, intr, timeout);
689
if (ret <= 0)
690
break;
691
692
/* Even for zero timeout the return value is 1 */
693
if (timeout)
694
timeout = ret;
695
}
696
dma_resv_iter_end(&cursor);
697
698
return ret;
699
}
700
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
701
702
/**
703
* dma_resv_set_deadline - Set a deadline on reservation's objects fences
704
* @obj: the reservation object
705
* @usage: controls which fences to include, see enum dma_resv_usage.
706
* @deadline: the requested deadline (MONOTONIC)
707
*
708
* May be called without holding the dma_resv lock. Sets @deadline on
709
* all fences filtered by @usage.
710
*/
711
void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage,
712
ktime_t deadline)
713
{
714
struct dma_resv_iter cursor;
715
struct dma_fence *fence;
716
717
dma_resv_iter_begin(&cursor, obj, usage);
718
dma_resv_for_each_fence_unlocked(&cursor, fence) {
719
dma_fence_set_deadline(fence, deadline);
720
}
721
dma_resv_iter_end(&cursor);
722
}
723
EXPORT_SYMBOL_GPL(dma_resv_set_deadline);
724
725
/**
726
* dma_resv_test_signaled - Test if a reservation object's fences have been
727
* signaled.
728
* @obj: the reservation object
729
* @usage: controls which fences to include, see enum dma_resv_usage.
730
*
731
* Callers are not required to hold specific locks, but maybe hold
732
* dma_resv_lock() already.
733
*
734
* RETURNS
735
*
736
* True if all fences signaled, else false.
737
*/
738
bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
739
{
740
struct dma_resv_iter cursor;
741
struct dma_fence *fence;
742
743
dma_resv_iter_begin(&cursor, obj, usage);
744
dma_resv_for_each_fence_unlocked(&cursor, fence) {
745
dma_resv_iter_end(&cursor);
746
return false;
747
}
748
dma_resv_iter_end(&cursor);
749
return true;
750
}
751
EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
752
753
/**
754
* dma_resv_describe - Dump description of the resv object into seq_file
755
* @obj: the reservation object
756
* @seq: the seq_file to dump the description into
757
*
758
* Dump a textual description of the fences inside an dma_resv object into the
759
* seq_file.
760
*/
761
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
762
{
763
static const char *usage[] = { "kernel", "write", "read", "bookkeep" };
764
struct dma_resv_iter cursor;
765
struct dma_fence *fence;
766
767
dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
768
seq_printf(seq, "\t%s fence:",
769
usage[dma_resv_iter_usage(&cursor)]);
770
dma_fence_describe(fence, seq);
771
}
772
}
773
EXPORT_SYMBOL_GPL(dma_resv_describe);
774
775
#if IS_ENABLED(CONFIG_LOCKDEP)
776
static int __init dma_resv_lockdep(void)
777
{
778
struct mm_struct *mm = mm_alloc();
779
struct ww_acquire_ctx ctx;
780
struct dma_resv obj;
781
struct address_space mapping;
782
int ret;
783
784
if (!mm)
785
return -ENOMEM;
786
787
dma_resv_init(&obj);
788
address_space_init_once(&mapping);
789
790
mmap_read_lock(mm);
791
ww_acquire_init(&ctx, &reservation_ww_class);
792
ret = dma_resv_lock(&obj, &ctx);
793
if (ret == -EDEADLK)
794
dma_resv_lock_slow(&obj, &ctx);
795
fs_reclaim_acquire(GFP_KERNEL);
796
/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
797
i_mmap_lock_write(&mapping);
798
i_mmap_unlock_write(&mapping);
799
#ifdef CONFIG_MMU_NOTIFIER
800
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
801
__dma_fence_might_wait();
802
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
803
#else
804
__dma_fence_might_wait();
805
#endif
806
fs_reclaim_release(GFP_KERNEL);
807
ww_mutex_unlock(&obj.lock);
808
ww_acquire_fini(&ctx);
809
mmap_read_unlock(mm);
810
811
mmput(mm);
812
813
return 0;
814
}
815
subsys_initcall(dma_resv_lockdep);
816
#endif
817
818