Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/base/devres.c
26378 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* drivers/base/devres.c - device resource management
4
*
5
* Copyright (c) 2006 SUSE Linux Products GmbH
6
* Copyright (c) 2006 Tejun Heo <[email protected]>
7
*/
8
9
#include <linux/device.h>
10
#include <linux/module.h>
11
#include <linux/slab.h>
12
#include <linux/percpu.h>
13
14
#include <asm/sections.h>
15
16
#include "base.h"
17
#include "trace.h"
18
19
struct devres_node {
20
struct list_head entry;
21
dr_release_t release;
22
const char *name;
23
size_t size;
24
};
25
26
struct devres {
27
struct devres_node node;
28
/*
29
* Some archs want to perform DMA into kmalloc caches
30
* and need a guaranteed alignment larger than
31
* the alignment of a 64-bit integer.
32
* Thus we use ARCH_DMA_MINALIGN for data[] which will force the same
33
* alignment for struct devres when allocated by kmalloc().
34
*/
35
u8 __aligned(ARCH_DMA_MINALIGN) data[];
36
};
37
38
struct devres_group {
39
struct devres_node node[2];
40
void *id;
41
int color;
42
/* -- 8 pointers */
43
};
44
45
static void set_node_dbginfo(struct devres_node *node, const char *name,
46
size_t size)
47
{
48
node->name = name;
49
node->size = size;
50
}
51
52
#ifdef CONFIG_DEBUG_DEVRES
53
static int log_devres = 0;
54
module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
55
56
static void devres_dbg(struct device *dev, struct devres_node *node,
57
const char *op)
58
{
59
if (unlikely(log_devres))
60
dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n",
61
op, node, node->name, node->size);
62
}
63
#else /* CONFIG_DEBUG_DEVRES */
64
#define devres_dbg(dev, node, op) do {} while (0)
65
#endif /* CONFIG_DEBUG_DEVRES */
66
67
static void devres_log(struct device *dev, struct devres_node *node,
68
const char *op)
69
{
70
trace_devres_log(dev, op, node, node->name, node->size);
71
devres_dbg(dev, node, op);
72
}
73
74
/*
75
* Release functions for devres group. These callbacks are used only
76
* for identification.
77
*/
78
static void group_open_release(struct device *dev, void *res)
79
{
80
/* noop */
81
}
82
83
static void group_close_release(struct device *dev, void *res)
84
{
85
/* noop */
86
}
87
88
static struct devres_group *node_to_group(struct devres_node *node)
89
{
90
if (node->release == &group_open_release)
91
return container_of(node, struct devres_group, node[0]);
92
if (node->release == &group_close_release)
93
return container_of(node, struct devres_group, node[1]);
94
return NULL;
95
}
96
97
static bool check_dr_size(size_t size, size_t *tot_size)
98
{
99
/* We must catch any near-SIZE_MAX cases that could overflow. */
100
if (unlikely(check_add_overflow(sizeof(struct devres),
101
size, tot_size)))
102
return false;
103
104
/* Actually allocate the full kmalloc bucket size. */
105
*tot_size = kmalloc_size_roundup(*tot_size);
106
107
return true;
108
}
109
110
static __always_inline struct devres *alloc_dr(dr_release_t release,
111
size_t size, gfp_t gfp, int nid)
112
{
113
size_t tot_size;
114
struct devres *dr;
115
116
if (!check_dr_size(size, &tot_size))
117
return NULL;
118
119
dr = kmalloc_node_track_caller(tot_size, gfp, nid);
120
if (unlikely(!dr))
121
return NULL;
122
123
/* No need to clear memory twice */
124
if (!(gfp & __GFP_ZERO))
125
memset(dr, 0, offsetof(struct devres, data));
126
127
INIT_LIST_HEAD(&dr->node.entry);
128
dr->node.release = release;
129
return dr;
130
}
131
132
static void add_dr(struct device *dev, struct devres_node *node)
133
{
134
devres_log(dev, node, "ADD");
135
BUG_ON(!list_empty(&node->entry));
136
list_add_tail(&node->entry, &dev->devres_head);
137
}
138
139
static void replace_dr(struct device *dev,
140
struct devres_node *old, struct devres_node *new)
141
{
142
devres_log(dev, old, "REPLACE");
143
BUG_ON(!list_empty(&new->entry));
144
list_replace(&old->entry, &new->entry);
145
}
146
147
/**
148
* __devres_alloc_node - Allocate device resource data
149
* @release: Release function devres will be associated with
150
* @size: Allocation size
151
* @gfp: Allocation flags
152
* @nid: NUMA node
153
* @name: Name of the resource
154
*
155
* Allocate devres of @size bytes. The allocated area is zeroed, then
156
* associated with @release. The returned pointer can be passed to
157
* other devres_*() functions.
158
*
159
* RETURNS:
160
* Pointer to allocated devres on success, NULL on failure.
161
*/
162
void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
163
const char *name)
164
{
165
struct devres *dr;
166
167
dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
168
if (unlikely(!dr))
169
return NULL;
170
set_node_dbginfo(&dr->node, name, size);
171
return dr->data;
172
}
173
EXPORT_SYMBOL_GPL(__devres_alloc_node);
174
175
/**
176
* devres_for_each_res - Resource iterator
177
* @dev: Device to iterate resource from
178
* @release: Look for resources associated with this release function
179
* @match: Match function (optional)
180
* @match_data: Data for the match function
181
* @fn: Function to be called for each matched resource.
182
* @data: Data for @fn, the 3rd parameter of @fn
183
*
184
* Call @fn for each devres of @dev which is associated with @release
185
* and for which @match returns 1.
186
*
187
* RETURNS:
188
* void
189
*/
190
void devres_for_each_res(struct device *dev, dr_release_t release,
191
dr_match_t match, void *match_data,
192
void (*fn)(struct device *, void *, void *),
193
void *data)
194
{
195
struct devres_node *node;
196
struct devres_node *tmp;
197
unsigned long flags;
198
199
if (!fn)
200
return;
201
202
spin_lock_irqsave(&dev->devres_lock, flags);
203
list_for_each_entry_safe_reverse(node, tmp,
204
&dev->devres_head, entry) {
205
struct devres *dr = container_of(node, struct devres, node);
206
207
if (node->release != release)
208
continue;
209
if (match && !match(dev, dr->data, match_data))
210
continue;
211
fn(dev, dr->data, data);
212
}
213
spin_unlock_irqrestore(&dev->devres_lock, flags);
214
}
215
EXPORT_SYMBOL_GPL(devres_for_each_res);
216
217
/**
218
* devres_free - Free device resource data
219
* @res: Pointer to devres data to free
220
*
221
* Free devres created with devres_alloc().
222
*/
223
void devres_free(void *res)
224
{
225
if (res) {
226
struct devres *dr = container_of(res, struct devres, data);
227
228
BUG_ON(!list_empty(&dr->node.entry));
229
kfree(dr);
230
}
231
}
232
EXPORT_SYMBOL_GPL(devres_free);
233
234
/**
235
* devres_add - Register device resource
236
* @dev: Device to add resource to
237
* @res: Resource to register
238
*
239
* Register devres @res to @dev. @res should have been allocated
240
* using devres_alloc(). On driver detach, the associated release
241
* function will be invoked and devres will be freed automatically.
242
*/
243
void devres_add(struct device *dev, void *res)
244
{
245
struct devres *dr = container_of(res, struct devres, data);
246
unsigned long flags;
247
248
spin_lock_irqsave(&dev->devres_lock, flags);
249
add_dr(dev, &dr->node);
250
spin_unlock_irqrestore(&dev->devres_lock, flags);
251
}
252
EXPORT_SYMBOL_GPL(devres_add);
253
254
static struct devres *find_dr(struct device *dev, dr_release_t release,
255
dr_match_t match, void *match_data)
256
{
257
struct devres_node *node;
258
259
list_for_each_entry_reverse(node, &dev->devres_head, entry) {
260
struct devres *dr = container_of(node, struct devres, node);
261
262
if (node->release != release)
263
continue;
264
if (match && !match(dev, dr->data, match_data))
265
continue;
266
return dr;
267
}
268
269
return NULL;
270
}
271
272
/**
273
* devres_find - Find device resource
274
* @dev: Device to lookup resource from
275
* @release: Look for resources associated with this release function
276
* @match: Match function (optional)
277
* @match_data: Data for the match function
278
*
279
* Find the latest devres of @dev which is associated with @release
280
* and for which @match returns 1. If @match is NULL, it's considered
281
* to match all.
282
*
283
* RETURNS:
284
* Pointer to found devres, NULL if not found.
285
*/
286
void *devres_find(struct device *dev, dr_release_t release,
287
dr_match_t match, void *match_data)
288
{
289
struct devres *dr;
290
unsigned long flags;
291
292
spin_lock_irqsave(&dev->devres_lock, flags);
293
dr = find_dr(dev, release, match, match_data);
294
spin_unlock_irqrestore(&dev->devres_lock, flags);
295
296
if (dr)
297
return dr->data;
298
return NULL;
299
}
300
EXPORT_SYMBOL_GPL(devres_find);
301
302
/**
303
* devres_get - Find devres, if non-existent, add one atomically
304
* @dev: Device to lookup or add devres for
305
* @new_res: Pointer to new initialized devres to add if not found
306
* @match: Match function (optional)
307
* @match_data: Data for the match function
308
*
309
* Find the latest devres of @dev which has the same release function
310
* as @new_res and for which @match return 1. If found, @new_res is
311
* freed; otherwise, @new_res is added atomically.
312
*
313
* RETURNS:
314
* Pointer to found or added devres.
315
*/
316
void *devres_get(struct device *dev, void *new_res,
317
dr_match_t match, void *match_data)
318
{
319
struct devres *new_dr = container_of(new_res, struct devres, data);
320
struct devres *dr;
321
unsigned long flags;
322
323
spin_lock_irqsave(&dev->devres_lock, flags);
324
dr = find_dr(dev, new_dr->node.release, match, match_data);
325
if (!dr) {
326
add_dr(dev, &new_dr->node);
327
dr = new_dr;
328
new_res = NULL;
329
}
330
spin_unlock_irqrestore(&dev->devres_lock, flags);
331
devres_free(new_res);
332
333
return dr->data;
334
}
335
EXPORT_SYMBOL_GPL(devres_get);
336
337
/**
338
* devres_remove - Find a device resource and remove it
339
* @dev: Device to find resource from
340
* @release: Look for resources associated with this release function
341
* @match: Match function (optional)
342
* @match_data: Data for the match function
343
*
344
* Find the latest devres of @dev associated with @release and for
345
* which @match returns 1. If @match is NULL, it's considered to
346
* match all. If found, the resource is removed atomically and
347
* returned.
348
*
349
* RETURNS:
350
* Pointer to removed devres on success, NULL if not found.
351
*/
352
void *devres_remove(struct device *dev, dr_release_t release,
353
dr_match_t match, void *match_data)
354
{
355
struct devres *dr;
356
unsigned long flags;
357
358
spin_lock_irqsave(&dev->devres_lock, flags);
359
dr = find_dr(dev, release, match, match_data);
360
if (dr) {
361
list_del_init(&dr->node.entry);
362
devres_log(dev, &dr->node, "REM");
363
}
364
spin_unlock_irqrestore(&dev->devres_lock, flags);
365
366
if (dr)
367
return dr->data;
368
return NULL;
369
}
370
EXPORT_SYMBOL_GPL(devres_remove);
371
372
/**
373
* devres_destroy - Find a device resource and destroy it
374
* @dev: Device to find resource from
375
* @release: Look for resources associated with this release function
376
* @match: Match function (optional)
377
* @match_data: Data for the match function
378
*
379
* Find the latest devres of @dev associated with @release and for
380
* which @match returns 1. If @match is NULL, it's considered to
381
* match all. If found, the resource is removed atomically and freed.
382
*
383
* Note that the release function for the resource will not be called,
384
* only the devres-allocated data will be freed. The caller becomes
385
* responsible for freeing any other data.
386
*
387
* RETURNS:
388
* 0 if devres is found and freed, -ENOENT if not found.
389
*/
390
int devres_destroy(struct device *dev, dr_release_t release,
391
dr_match_t match, void *match_data)
392
{
393
void *res;
394
395
res = devres_remove(dev, release, match, match_data);
396
if (unlikely(!res))
397
return -ENOENT;
398
399
devres_free(res);
400
return 0;
401
}
402
EXPORT_SYMBOL_GPL(devres_destroy);
403
404
405
/**
406
* devres_release - Find a device resource and destroy it, calling release
407
* @dev: Device to find resource from
408
* @release: Look for resources associated with this release function
409
* @match: Match function (optional)
410
* @match_data: Data for the match function
411
*
412
* Find the latest devres of @dev associated with @release and for
413
* which @match returns 1. If @match is NULL, it's considered to
414
* match all. If found, the resource is removed atomically, the
415
* release function called and the resource freed.
416
*
417
* RETURNS:
418
* 0 if devres is found and freed, -ENOENT if not found.
419
*/
420
int devres_release(struct device *dev, dr_release_t release,
421
dr_match_t match, void *match_data)
422
{
423
void *res;
424
425
res = devres_remove(dev, release, match, match_data);
426
if (unlikely(!res))
427
return -ENOENT;
428
429
(*release)(dev, res);
430
devres_free(res);
431
return 0;
432
}
433
EXPORT_SYMBOL_GPL(devres_release);
434
435
static int remove_nodes(struct device *dev,
436
struct list_head *first, struct list_head *end,
437
struct list_head *todo)
438
{
439
struct devres_node *node, *n;
440
int cnt = 0, nr_groups = 0;
441
442
/* First pass - move normal devres entries to @todo and clear
443
* devres_group colors.
444
*/
445
node = list_entry(first, struct devres_node, entry);
446
list_for_each_entry_safe_from(node, n, end, entry) {
447
struct devres_group *grp;
448
449
grp = node_to_group(node);
450
if (grp) {
451
/* clear color of group markers in the first pass */
452
grp->color = 0;
453
nr_groups++;
454
} else {
455
/* regular devres entry */
456
if (&node->entry == first)
457
first = first->next;
458
list_move_tail(&node->entry, todo);
459
cnt++;
460
}
461
}
462
463
if (!nr_groups)
464
return cnt;
465
466
/* Second pass - Scan groups and color them. A group gets
467
* color value of two iff the group is wholly contained in
468
* [current node, end). That is, for a closed group, both opening
469
* and closing markers should be in the range, while just the
470
* opening marker is enough for an open group.
471
*/
472
node = list_entry(first, struct devres_node, entry);
473
list_for_each_entry_safe_from(node, n, end, entry) {
474
struct devres_group *grp;
475
476
grp = node_to_group(node);
477
BUG_ON(!grp || list_empty(&grp->node[0].entry));
478
479
grp->color++;
480
if (list_empty(&grp->node[1].entry))
481
grp->color++;
482
483
BUG_ON(grp->color <= 0 || grp->color > 2);
484
if (grp->color == 2) {
485
/* No need to update current node or end. The removed
486
* nodes are always before both.
487
*/
488
list_move_tail(&grp->node[0].entry, todo);
489
list_del_init(&grp->node[1].entry);
490
}
491
}
492
493
return cnt;
494
}
495
496
static void release_nodes(struct device *dev, struct list_head *todo)
497
{
498
struct devres *dr, *tmp;
499
500
/* Release. Note that both devres and devres_group are
501
* handled as devres in the following loop. This is safe.
502
*/
503
list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) {
504
devres_log(dev, &dr->node, "REL");
505
dr->node.release(dev, dr->data);
506
kfree(dr);
507
}
508
}
509
510
/**
511
* devres_release_all - Release all managed resources
512
* @dev: Device to release resources for
513
*
514
* Release all resources associated with @dev. This function is
515
* called on driver detach.
516
*/
517
int devres_release_all(struct device *dev)
518
{
519
unsigned long flags;
520
LIST_HEAD(todo);
521
int cnt;
522
523
/* Looks like an uninitialized device structure */
524
if (WARN_ON(dev->devres_head.next == NULL))
525
return -ENODEV;
526
527
/* Nothing to release if list is empty */
528
if (list_empty(&dev->devres_head))
529
return 0;
530
531
spin_lock_irqsave(&dev->devres_lock, flags);
532
cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo);
533
spin_unlock_irqrestore(&dev->devres_lock, flags);
534
535
release_nodes(dev, &todo);
536
return cnt;
537
}
538
539
/**
540
* devres_open_group - Open a new devres group
541
* @dev: Device to open devres group for
542
* @id: Separator ID
543
* @gfp: Allocation flags
544
*
545
* Open a new devres group for @dev with @id. For @id, using a
546
* pointer to an object which won't be used for another group is
547
* recommended. If @id is NULL, address-wise unique ID is created.
548
*
549
* RETURNS:
550
* ID of the new group, NULL on failure.
551
*/
552
void *devres_open_group(struct device *dev, void *id, gfp_t gfp)
553
{
554
struct devres_group *grp;
555
unsigned long flags;
556
557
grp = kmalloc(sizeof(*grp), gfp);
558
if (unlikely(!grp))
559
return NULL;
560
561
grp->node[0].release = &group_open_release;
562
grp->node[1].release = &group_close_release;
563
INIT_LIST_HEAD(&grp->node[0].entry);
564
INIT_LIST_HEAD(&grp->node[1].entry);
565
set_node_dbginfo(&grp->node[0], "grp<", 0);
566
set_node_dbginfo(&grp->node[1], "grp>", 0);
567
grp->id = grp;
568
if (id)
569
grp->id = id;
570
grp->color = 0;
571
572
spin_lock_irqsave(&dev->devres_lock, flags);
573
add_dr(dev, &grp->node[0]);
574
spin_unlock_irqrestore(&dev->devres_lock, flags);
575
return grp->id;
576
}
577
EXPORT_SYMBOL_GPL(devres_open_group);
578
579
/*
580
* Find devres group with ID @id. If @id is NULL, look for the latest open
581
* group.
582
*/
583
static struct devres_group *find_group(struct device *dev, void *id)
584
{
585
struct devres_node *node;
586
587
list_for_each_entry_reverse(node, &dev->devres_head, entry) {
588
struct devres_group *grp;
589
590
if (node->release != &group_open_release)
591
continue;
592
593
grp = container_of(node, struct devres_group, node[0]);
594
595
if (id) {
596
if (grp->id == id)
597
return grp;
598
} else if (list_empty(&grp->node[1].entry))
599
return grp;
600
}
601
602
return NULL;
603
}
604
605
/**
606
* devres_close_group - Close a devres group
607
* @dev: Device to close devres group for
608
* @id: ID of target group, can be NULL
609
*
610
* Close the group identified by @id. If @id is NULL, the latest open
611
* group is selected.
612
*/
613
void devres_close_group(struct device *dev, void *id)
614
{
615
struct devres_group *grp;
616
unsigned long flags;
617
618
spin_lock_irqsave(&dev->devres_lock, flags);
619
620
grp = find_group(dev, id);
621
if (grp)
622
add_dr(dev, &grp->node[1]);
623
else
624
WARN_ON(1);
625
626
spin_unlock_irqrestore(&dev->devres_lock, flags);
627
}
628
EXPORT_SYMBOL_GPL(devres_close_group);
629
630
/**
631
* devres_remove_group - Remove a devres group
632
* @dev: Device to remove group for
633
* @id: ID of target group, can be NULL
634
*
635
* Remove the group identified by @id. If @id is NULL, the latest
636
* open group is selected. Note that removing a group doesn't affect
637
* any other resources.
638
*/
639
void devres_remove_group(struct device *dev, void *id)
640
{
641
struct devres_group *grp;
642
unsigned long flags;
643
644
spin_lock_irqsave(&dev->devres_lock, flags);
645
646
grp = find_group(dev, id);
647
if (grp) {
648
list_del_init(&grp->node[0].entry);
649
list_del_init(&grp->node[1].entry);
650
devres_log(dev, &grp->node[0], "REM");
651
} else
652
WARN_ON(1);
653
654
spin_unlock_irqrestore(&dev->devres_lock, flags);
655
656
kfree(grp);
657
}
658
EXPORT_SYMBOL_GPL(devres_remove_group);
659
660
/**
661
* devres_release_group - Release resources in a devres group
662
* @dev: Device to release group for
663
* @id: ID of target group, can be NULL
664
*
665
* Release all resources in the group identified by @id. If @id is
666
* NULL, the latest open group is selected. The selected group and
667
* groups properly nested inside the selected group are removed.
668
*
669
* RETURNS:
670
* The number of released non-group resources.
671
*/
672
int devres_release_group(struct device *dev, void *id)
673
{
674
struct devres_group *grp;
675
unsigned long flags;
676
LIST_HEAD(todo);
677
int cnt = 0;
678
679
spin_lock_irqsave(&dev->devres_lock, flags);
680
681
grp = find_group(dev, id);
682
if (grp) {
683
struct list_head *first = &grp->node[0].entry;
684
struct list_head *end = &dev->devres_head;
685
686
if (!list_empty(&grp->node[1].entry))
687
end = grp->node[1].entry.next;
688
689
cnt = remove_nodes(dev, first, end, &todo);
690
spin_unlock_irqrestore(&dev->devres_lock, flags);
691
692
release_nodes(dev, &todo);
693
} else if (list_empty(&dev->devres_head)) {
694
/*
695
* dev is probably dying via devres_release_all(): groups
696
* have already been removed and are on the process of
697
* being released - don't touch and don't warn.
698
*/
699
spin_unlock_irqrestore(&dev->devres_lock, flags);
700
} else {
701
WARN_ON(1);
702
spin_unlock_irqrestore(&dev->devres_lock, flags);
703
}
704
705
return cnt;
706
}
707
EXPORT_SYMBOL_GPL(devres_release_group);
708
709
/*
710
* Custom devres actions allow inserting a simple function call
711
* into the teardown sequence.
712
*/
713
714
struct action_devres {
715
void *data;
716
void (*action)(void *);
717
};
718
719
static int devm_action_match(struct device *dev, void *res, void *p)
720
{
721
struct action_devres *devres = res;
722
struct action_devres *target = p;
723
724
return devres->action == target->action &&
725
devres->data == target->data;
726
}
727
728
static void devm_action_release(struct device *dev, void *res)
729
{
730
struct action_devres *devres = res;
731
732
devres->action(devres->data);
733
}
734
735
/**
736
* __devm_add_action() - add a custom action to list of managed resources
737
* @dev: Device that owns the action
738
* @action: Function that should be called
739
* @data: Pointer to data passed to @action implementation
740
* @name: Name of the resource (for debugging purposes)
741
*
742
* This adds a custom action to the list of managed resources so that
743
* it gets executed as part of standard resource unwinding.
744
*/
745
int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name)
746
{
747
struct action_devres *devres;
748
749
devres = __devres_alloc_node(devm_action_release, sizeof(struct action_devres),
750
GFP_KERNEL, NUMA_NO_NODE, name);
751
if (!devres)
752
return -ENOMEM;
753
754
devres->data = data;
755
devres->action = action;
756
757
devres_add(dev, devres);
758
return 0;
759
}
760
EXPORT_SYMBOL_GPL(__devm_add_action);
761
762
bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data)
763
{
764
struct action_devres devres = {
765
.data = data,
766
.action = action,
767
};
768
769
return devres_find(dev, devm_action_release, devm_action_match, &devres);
770
}
771
EXPORT_SYMBOL_GPL(devm_is_action_added);
772
773
/**
774
* devm_remove_action_nowarn() - removes previously added custom action
775
* @dev: Device that owns the action
776
* @action: Function implementing the action
777
* @data: Pointer to data passed to @action implementation
778
*
779
* Removes instance of @action previously added by devm_add_action().
780
* Both action and data should match one of the existing entries.
781
*
782
* In contrast to devm_remove_action(), this function does not WARN() if no
783
* entry could have been found.
784
*
785
* This should only be used if the action is contained in an object with
786
* independent lifetime management, e.g. the Devres rust abstraction.
787
*
788
* Causing the warning from regular driver code most likely indicates an abuse
789
* of the devres API.
790
*
791
* Returns: 0 on success, -ENOENT if no entry could have been found.
792
*/
793
int devm_remove_action_nowarn(struct device *dev,
794
void (*action)(void *),
795
void *data)
796
{
797
struct action_devres devres = {
798
.data = data,
799
.action = action,
800
};
801
802
return devres_destroy(dev, devm_action_release, devm_action_match,
803
&devres);
804
}
805
EXPORT_SYMBOL_GPL(devm_remove_action_nowarn);
806
807
/**
808
* devm_release_action() - release previously added custom action
809
* @dev: Device that owns the action
810
* @action: Function implementing the action
811
* @data: Pointer to data passed to @action implementation
812
*
813
* Releases and removes instance of @action previously added by
814
* devm_add_action(). Both action and data should match one of the
815
* existing entries.
816
*/
817
void devm_release_action(struct device *dev, void (*action)(void *), void *data)
818
{
819
struct action_devres devres = {
820
.data = data,
821
.action = action,
822
};
823
824
WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
825
&devres));
826
827
}
828
EXPORT_SYMBOL_GPL(devm_release_action);
829
830
/*
831
* Managed kmalloc/kfree
832
*/
833
static void devm_kmalloc_release(struct device *dev, void *res)
834
{
835
/* noop */
836
}
837
838
static int devm_kmalloc_match(struct device *dev, void *res, void *data)
839
{
840
return res == data;
841
}
842
843
/**
844
* devm_kmalloc - Resource-managed kmalloc
845
* @dev: Device to allocate memory for
846
* @size: Allocation size
847
* @gfp: Allocation gfp flags
848
*
849
* Managed kmalloc. Memory allocated with this function is
850
* automatically freed on driver detach. Like all other devres
851
* resources, guaranteed alignment is unsigned long long.
852
*
853
* RETURNS:
854
* Pointer to allocated memory on success, NULL on failure.
855
*/
856
void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
857
{
858
struct devres *dr;
859
860
if (unlikely(!size))
861
return ZERO_SIZE_PTR;
862
863
/* use raw alloc_dr for kmalloc caller tracing */
864
dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
865
if (unlikely(!dr))
866
return NULL;
867
868
/*
869
* This is named devm_kzalloc_release for historical reasons
870
* The initial implementation did not support kmalloc, only kzalloc
871
*/
872
set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
873
devres_add(dev, dr->data);
874
return dr->data;
875
}
876
EXPORT_SYMBOL_GPL(devm_kmalloc);
877
878
/**
879
* devm_krealloc - Resource-managed krealloc()
880
* @dev: Device to re-allocate memory for
881
* @ptr: Pointer to the memory chunk to re-allocate
882
* @new_size: New allocation size
883
* @gfp: Allocation gfp flags
884
*
885
* Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc().
886
* Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR,
887
* it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the
888
* previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't
889
* change the order in which the release callback for the re-alloc'ed devres
890
* will be called (except when falling back to devm_kmalloc() or when freeing
891
* resources when new_size is zero). The contents of the memory are preserved
892
* up to the lesser of new and old sizes.
893
*/
894
void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
895
{
896
size_t total_new_size, total_old_size;
897
struct devres *old_dr, *new_dr;
898
unsigned long flags;
899
900
if (unlikely(!new_size)) {
901
devm_kfree(dev, ptr);
902
return ZERO_SIZE_PTR;
903
}
904
905
if (unlikely(ZERO_OR_NULL_PTR(ptr)))
906
return devm_kmalloc(dev, new_size, gfp);
907
908
if (WARN_ON(is_kernel_rodata((unsigned long)ptr)))
909
/*
910
* We cannot reliably realloc a const string returned by
911
* devm_kstrdup_const().
912
*/
913
return NULL;
914
915
if (!check_dr_size(new_size, &total_new_size))
916
return NULL;
917
918
total_old_size = ksize(container_of(ptr, struct devres, data));
919
if (total_old_size == 0) {
920
WARN(1, "Pointer doesn't point to dynamically allocated memory.");
921
return NULL;
922
}
923
924
/*
925
* If new size is smaller or equal to the actual number of bytes
926
* allocated previously - just return the same pointer.
927
*/
928
if (total_new_size <= total_old_size)
929
return ptr;
930
931
/*
932
* Otherwise: allocate new, larger chunk. We need to allocate before
933
* taking the lock as most probably the caller uses GFP_KERNEL.
934
* alloc_dr() will call check_dr_size() to reserve extra memory
935
* for struct devres automatically, so size @new_size user request
936
* is delivered to it directly as devm_kmalloc() does.
937
*/
938
new_dr = alloc_dr(devm_kmalloc_release,
939
new_size, gfp, dev_to_node(dev));
940
if (!new_dr)
941
return NULL;
942
943
/*
944
* The spinlock protects the linked list against concurrent
945
* modifications but not the resource itself.
946
*/
947
spin_lock_irqsave(&dev->devres_lock, flags);
948
949
old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr);
950
if (!old_dr) {
951
spin_unlock_irqrestore(&dev->devres_lock, flags);
952
kfree(new_dr);
953
WARN(1, "Memory chunk not managed or managed by a different device.");
954
return NULL;
955
}
956
957
replace_dr(dev, &old_dr->node, &new_dr->node);
958
959
spin_unlock_irqrestore(&dev->devres_lock, flags);
960
961
/*
962
* We can copy the memory contents after releasing the lock as we're
963
* no longer modifying the list links.
964
*/
965
memcpy(new_dr->data, old_dr->data,
966
total_old_size - offsetof(struct devres, data));
967
/*
968
* Same for releasing the old devres - it's now been removed from the
969
* list. This is also the reason why we must not use devm_kfree() - the
970
* links are no longer valid.
971
*/
972
kfree(old_dr);
973
974
return new_dr->data;
975
}
976
EXPORT_SYMBOL_GPL(devm_krealloc);
977
978
/**
979
* devm_kstrdup - Allocate resource managed space and
980
* copy an existing string into that.
981
* @dev: Device to allocate memory for
982
* @s: the string to duplicate
983
* @gfp: the GFP mask used in the devm_kmalloc() call when
984
* allocating memory
985
* RETURNS:
986
* Pointer to allocated string on success, NULL on failure.
987
*/
988
char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
989
{
990
if (!s)
991
return NULL;
992
993
return devm_kmemdup(dev, s, strlen(s) + 1, gfp);
994
}
995
EXPORT_SYMBOL_GPL(devm_kstrdup);
996
997
/**
998
* devm_kstrdup_const - resource managed conditional string duplication
999
* @dev: device for which to duplicate the string
1000
* @s: the string to duplicate
1001
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
1002
*
1003
* Strings allocated by devm_kstrdup_const will be automatically freed when
1004
* the associated device is detached.
1005
*
1006
* RETURNS:
1007
* Source string if it is in .rodata section otherwise it falls back to
1008
* devm_kstrdup.
1009
*/
1010
const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
1011
{
1012
if (is_kernel_rodata((unsigned long)s))
1013
return s;
1014
1015
return devm_kstrdup(dev, s, gfp);
1016
}
1017
EXPORT_SYMBOL_GPL(devm_kstrdup_const);
1018
1019
/**
1020
* devm_kvasprintf - Allocate resource managed space and format a string
1021
* into that.
1022
* @dev: Device to allocate memory for
1023
* @gfp: the GFP mask used in the devm_kmalloc() call when
1024
* allocating memory
1025
* @fmt: The printf()-style format string
1026
* @ap: Arguments for the format string
1027
* RETURNS:
1028
* Pointer to allocated string on success, NULL on failure.
1029
*/
1030
char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
1031
va_list ap)
1032
{
1033
unsigned int len;
1034
char *p;
1035
va_list aq;
1036
1037
va_copy(aq, ap);
1038
len = vsnprintf(NULL, 0, fmt, aq);
1039
va_end(aq);
1040
1041
p = devm_kmalloc(dev, len+1, gfp);
1042
if (!p)
1043
return NULL;
1044
1045
vsnprintf(p, len+1, fmt, ap);
1046
1047
return p;
1048
}
1049
EXPORT_SYMBOL(devm_kvasprintf);
1050
1051
/**
1052
* devm_kasprintf - Allocate resource managed space and format a string
1053
* into that.
1054
* @dev: Device to allocate memory for
1055
* @gfp: the GFP mask used in the devm_kmalloc() call when
1056
* allocating memory
1057
* @fmt: The printf()-style format string
1058
* @...: Arguments for the format string
1059
* RETURNS:
1060
* Pointer to allocated string on success, NULL on failure.
1061
*/
1062
char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
1063
{
1064
va_list ap;
1065
char *p;
1066
1067
va_start(ap, fmt);
1068
p = devm_kvasprintf(dev, gfp, fmt, ap);
1069
va_end(ap);
1070
1071
return p;
1072
}
1073
EXPORT_SYMBOL_GPL(devm_kasprintf);
1074
1075
/**
1076
* devm_kfree - Resource-managed kfree
1077
* @dev: Device this memory belongs to
1078
* @p: Memory to free
1079
*
1080
* Free memory allocated with devm_kmalloc().
1081
*/
1082
void devm_kfree(struct device *dev, const void *p)
1083
{
1084
int rc;
1085
1086
/*
1087
* Special cases: pointer to a string in .rodata returned by
1088
* devm_kstrdup_const() or NULL/ZERO ptr.
1089
*/
1090
if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p)))
1091
return;
1092
1093
rc = devres_destroy(dev, devm_kmalloc_release,
1094
devm_kmalloc_match, (void *)p);
1095
WARN_ON(rc);
1096
}
1097
EXPORT_SYMBOL_GPL(devm_kfree);
1098
1099
/**
1100
* devm_kmemdup - Resource-managed kmemdup
1101
* @dev: Device this memory belongs to
1102
* @src: Memory region to duplicate
1103
* @len: Memory region length
1104
* @gfp: GFP mask to use
1105
*
1106
* Duplicate region of a memory using resource managed kmalloc
1107
*/
1108
void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
1109
{
1110
void *p;
1111
1112
p = devm_kmalloc(dev, len, gfp);
1113
if (p)
1114
memcpy(p, src, len);
1115
1116
return p;
1117
}
1118
EXPORT_SYMBOL_GPL(devm_kmemdup);
1119
1120
struct pages_devres {
1121
unsigned long addr;
1122
unsigned int order;
1123
};
1124
1125
static int devm_pages_match(struct device *dev, void *res, void *p)
1126
{
1127
struct pages_devres *devres = res;
1128
struct pages_devres *target = p;
1129
1130
return devres->addr == target->addr;
1131
}
1132
1133
static void devm_pages_release(struct device *dev, void *res)
1134
{
1135
struct pages_devres *devres = res;
1136
1137
free_pages(devres->addr, devres->order);
1138
}
1139
1140
/**
1141
* devm_get_free_pages - Resource-managed __get_free_pages
1142
* @dev: Device to allocate memory for
1143
* @gfp_mask: Allocation gfp flags
1144
* @order: Allocation size is (1 << order) pages
1145
*
1146
* Managed get_free_pages. Memory allocated with this function is
1147
* automatically freed on driver detach.
1148
*
1149
* RETURNS:
1150
* Address of allocated memory on success, 0 on failure.
1151
*/
1152
1153
unsigned long devm_get_free_pages(struct device *dev,
1154
gfp_t gfp_mask, unsigned int order)
1155
{
1156
struct pages_devres *devres;
1157
unsigned long addr;
1158
1159
addr = __get_free_pages(gfp_mask, order);
1160
1161
if (unlikely(!addr))
1162
return 0;
1163
1164
devres = devres_alloc(devm_pages_release,
1165
sizeof(struct pages_devres), GFP_KERNEL);
1166
if (unlikely(!devres)) {
1167
free_pages(addr, order);
1168
return 0;
1169
}
1170
1171
devres->addr = addr;
1172
devres->order = order;
1173
1174
devres_add(dev, devres);
1175
return addr;
1176
}
1177
EXPORT_SYMBOL_GPL(devm_get_free_pages);
1178
1179
/**
1180
* devm_free_pages - Resource-managed free_pages
1181
* @dev: Device this memory belongs to
1182
* @addr: Memory to free
1183
*
1184
* Free memory allocated with devm_get_free_pages(). Unlike free_pages,
1185
* there is no need to supply the @order.
1186
*/
1187
void devm_free_pages(struct device *dev, unsigned long addr)
1188
{
1189
struct pages_devres devres = { .addr = addr };
1190
1191
WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
1192
&devres));
1193
}
1194
EXPORT_SYMBOL_GPL(devm_free_pages);
1195
1196
static void devm_percpu_release(struct device *dev, void *pdata)
1197
{
1198
void __percpu *p;
1199
1200
p = *(void __percpu **)pdata;
1201
free_percpu(p);
1202
}
1203
1204
static int devm_percpu_match(struct device *dev, void *data, void *p)
1205
{
1206
struct devres *devr = container_of(data, struct devres, data);
1207
1208
return *(void **)devr->data == p;
1209
}
1210
1211
/**
1212
* __devm_alloc_percpu - Resource-managed alloc_percpu
1213
* @dev: Device to allocate per-cpu memory for
1214
* @size: Size of per-cpu memory to allocate
1215
* @align: Alignment of per-cpu memory to allocate
1216
*
1217
* Managed alloc_percpu. Per-cpu memory allocated with this function is
1218
* automatically freed on driver detach.
1219
*
1220
* RETURNS:
1221
* Pointer to allocated memory on success, NULL on failure.
1222
*/
1223
void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
1224
size_t align)
1225
{
1226
void *p;
1227
void __percpu *pcpu;
1228
1229
pcpu = __alloc_percpu(size, align);
1230
if (!pcpu)
1231
return NULL;
1232
1233
p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
1234
if (!p) {
1235
free_percpu(pcpu);
1236
return NULL;
1237
}
1238
1239
*(void __percpu **)p = pcpu;
1240
1241
devres_add(dev, p);
1242
1243
return pcpu;
1244
}
1245
EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
1246
1247
/**
1248
* devm_free_percpu - Resource-managed free_percpu
1249
* @dev: Device this memory belongs to
1250
* @pdata: Per-cpu memory to free
1251
*
1252
* Free memory allocated with devm_alloc_percpu().
1253
*/
1254
void devm_free_percpu(struct device *dev, void __percpu *pdata)
1255
{
1256
/*
1257
* Use devres_release() to prevent memory leakage as
1258
* devm_free_pages() does.
1259
*/
1260
WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
1261
(void *)(__force unsigned long)pdata));
1262
}
1263
EXPORT_SYMBOL_GPL(devm_free_percpu);
1264
1265