Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/base/core.c
26378 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* drivers/base/core.c - core driver model code (device registration, etc)
4
*
5
* Copyright (c) 2002-3 Patrick Mochel
6
* Copyright (c) 2002-3 Open Source Development Labs
7
* Copyright (c) 2006 Greg Kroah-Hartman <[email protected]>
8
* Copyright (c) 2006 Novell, Inc.
9
*/
10
11
#include <linux/acpi.h>
12
#include <linux/blkdev.h>
13
#include <linux/cleanup.h>
14
#include <linux/cpufreq.h>
15
#include <linux/device.h>
16
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
17
#include <linux/err.h>
18
#include <linux/fwnode.h>
19
#include <linux/init.h>
20
#include <linux/kdev_t.h>
21
#include <linux/kstrtox.h>
22
#include <linux/module.h>
23
#include <linux/mutex.h>
24
#include <linux/netdevice.h>
25
#include <linux/notifier.h>
26
#include <linux/of.h>
27
#include <linux/of_device.h>
28
#include <linux/pm_runtime.h>
29
#include <linux/sched/mm.h>
30
#include <linux/sched/signal.h>
31
#include <linux/slab.h>
32
#include <linux/string_helpers.h>
33
#include <linux/swiotlb.h>
34
#include <linux/sysfs.h>
35
36
#include "base.h"
37
#include "physical_location.h"
38
#include "power/power.h"
39
40
/* Device links support. */
41
static LIST_HEAD(deferred_sync);
42
static unsigned int defer_sync_state_count = 1;
43
static DEFINE_MUTEX(fwnode_link_lock);
44
static bool fw_devlink_is_permissive(void);
45
static void __fw_devlink_link_to_consumers(struct device *dev);
46
static bool fw_devlink_drv_reg_done;
47
static bool fw_devlink_best_effort;
48
static struct workqueue_struct *device_link_wq;
49
50
/**
51
* __fwnode_link_add - Create a link between two fwnode_handles.
52
* @con: Consumer end of the link.
53
* @sup: Supplier end of the link.
54
* @flags: Link flags.
55
*
56
* Create a fwnode link between fwnode handles @con and @sup. The fwnode link
57
* represents the detail that the firmware lists @sup fwnode as supplying a
58
* resource to @con.
59
*
60
* The driver core will use the fwnode link to create a device link between the
61
* two device objects corresponding to @con and @sup when they are created. The
62
* driver core will automatically delete the fwnode link between @con and @sup
63
* after doing that.
64
*
65
* Attempts to create duplicate links between the same pair of fwnode handles
66
* are ignored and there is no reference counting.
67
*/
68
static int __fwnode_link_add(struct fwnode_handle *con,
69
struct fwnode_handle *sup, u8 flags)
70
{
71
struct fwnode_link *link;
72
73
list_for_each_entry(link, &sup->consumers, s_hook)
74
if (link->consumer == con) {
75
link->flags |= flags;
76
return 0;
77
}
78
79
link = kzalloc(sizeof(*link), GFP_KERNEL);
80
if (!link)
81
return -ENOMEM;
82
83
link->supplier = sup;
84
INIT_LIST_HEAD(&link->s_hook);
85
link->consumer = con;
86
INIT_LIST_HEAD(&link->c_hook);
87
link->flags = flags;
88
89
list_add(&link->s_hook, &sup->consumers);
90
list_add(&link->c_hook, &con->suppliers);
91
pr_debug("%pfwf Linked as a fwnode consumer to %pfwf\n",
92
con, sup);
93
94
return 0;
95
}
96
97
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
98
u8 flags)
99
{
100
guard(mutex)(&fwnode_link_lock);
101
102
return __fwnode_link_add(con, sup, flags);
103
}
104
105
/**
106
* __fwnode_link_del - Delete a link between two fwnode_handles.
107
* @link: the fwnode_link to be deleted
108
*
109
* The fwnode_link_lock needs to be held when this function is called.
110
*/
111
static void __fwnode_link_del(struct fwnode_link *link)
112
{
113
pr_debug("%pfwf Dropping the fwnode link to %pfwf\n",
114
link->consumer, link->supplier);
115
list_del(&link->s_hook);
116
list_del(&link->c_hook);
117
kfree(link);
118
}
119
120
/**
121
* __fwnode_link_cycle - Mark a fwnode link as being part of a cycle.
122
* @link: the fwnode_link to be marked
123
*
124
* The fwnode_link_lock needs to be held when this function is called.
125
*/
126
static void __fwnode_link_cycle(struct fwnode_link *link)
127
{
128
pr_debug("%pfwf: cycle: depends on %pfwf\n",
129
link->consumer, link->supplier);
130
link->flags |= FWLINK_FLAG_CYCLE;
131
}
132
133
/**
134
* fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
135
* @fwnode: fwnode whose supplier links need to be deleted
136
*
137
* Deletes all supplier links connecting directly to @fwnode.
138
*/
139
static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
140
{
141
struct fwnode_link *link, *tmp;
142
143
guard(mutex)(&fwnode_link_lock);
144
145
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
146
__fwnode_link_del(link);
147
}
148
149
/**
150
* fwnode_links_purge_consumers - Delete all consumer links of fwnode_handle.
151
* @fwnode: fwnode whose consumer links need to be deleted
152
*
153
* Deletes all consumer links connecting directly to @fwnode.
154
*/
155
static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
156
{
157
struct fwnode_link *link, *tmp;
158
159
guard(mutex)(&fwnode_link_lock);
160
161
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
162
__fwnode_link_del(link);
163
}
164
165
/**
166
* fwnode_links_purge - Delete all links connected to a fwnode_handle.
167
* @fwnode: fwnode whose links needs to be deleted
168
*
169
* Deletes all links connecting directly to a fwnode.
170
*/
171
void fwnode_links_purge(struct fwnode_handle *fwnode)
172
{
173
fwnode_links_purge_suppliers(fwnode);
174
fwnode_links_purge_consumers(fwnode);
175
}
176
177
void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
178
{
179
struct fwnode_handle *child;
180
181
/* Don't purge consumer links of an added child */
182
if (fwnode->dev)
183
return;
184
185
fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
186
fwnode_links_purge_consumers(fwnode);
187
188
fwnode_for_each_available_child_node(fwnode, child)
189
fw_devlink_purge_absent_suppliers(child);
190
}
191
EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
192
193
/**
194
* __fwnode_links_move_consumers - Move consumer from @from to @to fwnode_handle
195
* @from: move consumers away from this fwnode
196
* @to: move consumers to this fwnode
197
*
198
* Move all consumer links from @from fwnode to @to fwnode.
199
*/
200
static void __fwnode_links_move_consumers(struct fwnode_handle *from,
201
struct fwnode_handle *to)
202
{
203
struct fwnode_link *link, *tmp;
204
205
list_for_each_entry_safe(link, tmp, &from->consumers, s_hook) {
206
__fwnode_link_add(link->consumer, to, link->flags);
207
__fwnode_link_del(link);
208
}
209
}
210
211
/**
212
* __fw_devlink_pickup_dangling_consumers - Pick up dangling consumers
213
* @fwnode: fwnode from which to pick up dangling consumers
214
* @new_sup: fwnode of new supplier
215
*
216
* If the @fwnode has a corresponding struct device and the device supports
217
* probing (that is, added to a bus), then we want to let fw_devlink create
218
* MANAGED device links to this device, so leave @fwnode and its descendant's
219
* fwnode links alone.
220
*
221
* Otherwise, move its consumers to the new supplier @new_sup.
222
*/
223
static void __fw_devlink_pickup_dangling_consumers(struct fwnode_handle *fwnode,
224
struct fwnode_handle *new_sup)
225
{
226
struct fwnode_handle *child;
227
228
if (fwnode->dev && fwnode->dev->bus)
229
return;
230
231
fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
232
__fwnode_links_move_consumers(fwnode, new_sup);
233
234
fwnode_for_each_available_child_node(fwnode, child)
235
__fw_devlink_pickup_dangling_consumers(child, new_sup);
236
}
237
238
static DEFINE_MUTEX(device_links_lock);
239
DEFINE_STATIC_SRCU(device_links_srcu);
240
241
static inline void device_links_write_lock(void)
242
{
243
mutex_lock(&device_links_lock);
244
}
245
246
static inline void device_links_write_unlock(void)
247
{
248
mutex_unlock(&device_links_lock);
249
}
250
251
int device_links_read_lock(void) __acquires(&device_links_srcu)
252
{
253
return srcu_read_lock(&device_links_srcu);
254
}
255
256
void device_links_read_unlock(int idx) __releases(&device_links_srcu)
257
{
258
srcu_read_unlock(&device_links_srcu, idx);
259
}
260
261
int device_links_read_lock_held(void)
262
{
263
return srcu_read_lock_held(&device_links_srcu);
264
}
265
266
static void device_link_synchronize_removal(void)
267
{
268
synchronize_srcu(&device_links_srcu);
269
}
270
271
static void device_link_remove_from_lists(struct device_link *link)
272
{
273
list_del_rcu(&link->s_node);
274
list_del_rcu(&link->c_node);
275
}
276
277
static bool device_is_ancestor(struct device *dev, struct device *target)
278
{
279
while (target->parent) {
280
target = target->parent;
281
if (dev == target)
282
return true;
283
}
284
return false;
285
}
286
287
#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
288
DL_FLAG_CYCLE | \
289
DL_FLAG_MANAGED)
290
static inline bool device_link_flag_is_sync_state_only(u32 flags)
291
{
292
return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
293
}
294
295
/**
296
* device_is_dependent - Check if one device depends on another one
297
* @dev: Device to check dependencies for.
298
* @target: Device to check against.
299
*
300
* Check if @target depends on @dev or any device dependent on it (its child or
301
* its consumer etc). Return 1 if that is the case or 0 otherwise.
302
*/
303
static int device_is_dependent(struct device *dev, void *target)
304
{
305
struct device_link *link;
306
int ret;
307
308
/*
309
* The "ancestors" check is needed to catch the case when the target
310
* device has not been completely initialized yet and it is still
311
* missing from the list of children of its parent device.
312
*/
313
if (dev == target || device_is_ancestor(dev, target))
314
return 1;
315
316
ret = device_for_each_child(dev, target, device_is_dependent);
317
if (ret)
318
return ret;
319
320
list_for_each_entry(link, &dev->links.consumers, s_node) {
321
if (device_link_flag_is_sync_state_only(link->flags))
322
continue;
323
324
if (link->consumer == target)
325
return 1;
326
327
ret = device_is_dependent(link->consumer, target);
328
if (ret)
329
break;
330
}
331
return ret;
332
}
333
334
static void device_link_init_status(struct device_link *link,
335
struct device *consumer,
336
struct device *supplier)
337
{
338
switch (supplier->links.status) {
339
case DL_DEV_PROBING:
340
switch (consumer->links.status) {
341
case DL_DEV_PROBING:
342
/*
343
* A consumer driver can create a link to a supplier
344
* that has not completed its probing yet as long as it
345
* knows that the supplier is already functional (for
346
* example, it has just acquired some resources from the
347
* supplier).
348
*/
349
link->status = DL_STATE_CONSUMER_PROBE;
350
break;
351
default:
352
link->status = DL_STATE_DORMANT;
353
break;
354
}
355
break;
356
case DL_DEV_DRIVER_BOUND:
357
switch (consumer->links.status) {
358
case DL_DEV_PROBING:
359
link->status = DL_STATE_CONSUMER_PROBE;
360
break;
361
case DL_DEV_DRIVER_BOUND:
362
link->status = DL_STATE_ACTIVE;
363
break;
364
default:
365
link->status = DL_STATE_AVAILABLE;
366
break;
367
}
368
break;
369
case DL_DEV_UNBINDING:
370
link->status = DL_STATE_SUPPLIER_UNBIND;
371
break;
372
default:
373
link->status = DL_STATE_DORMANT;
374
break;
375
}
376
}
377
378
static int device_reorder_to_tail(struct device *dev, void *not_used)
379
{
380
struct device_link *link;
381
382
/*
383
* Devices that have not been registered yet will be put to the ends
384
* of the lists during the registration, so skip them here.
385
*/
386
if (device_is_registered(dev))
387
devices_kset_move_last(dev);
388
389
if (device_pm_initialized(dev))
390
device_pm_move_last(dev);
391
392
device_for_each_child(dev, NULL, device_reorder_to_tail);
393
list_for_each_entry(link, &dev->links.consumers, s_node) {
394
if (device_link_flag_is_sync_state_only(link->flags))
395
continue;
396
device_reorder_to_tail(link->consumer, NULL);
397
}
398
399
return 0;
400
}
401
402
/**
403
* device_pm_move_to_tail - Move set of devices to the end of device lists
404
* @dev: Device to move
405
*
406
* This is a device_reorder_to_tail() wrapper taking the requisite locks.
407
*
408
* It moves the @dev along with all of its children and all of its consumers
409
* to the ends of the device_kset and dpm_list, recursively.
410
*/
411
void device_pm_move_to_tail(struct device *dev)
412
{
413
int idx;
414
415
idx = device_links_read_lock();
416
device_pm_lock();
417
device_reorder_to_tail(dev, NULL);
418
device_pm_unlock();
419
device_links_read_unlock(idx);
420
}
421
422
#define to_devlink(dev) container_of((dev), struct device_link, link_dev)
423
424
static ssize_t status_show(struct device *dev,
425
struct device_attribute *attr, char *buf)
426
{
427
const char *output;
428
429
switch (to_devlink(dev)->status) {
430
case DL_STATE_NONE:
431
output = "not tracked";
432
break;
433
case DL_STATE_DORMANT:
434
output = "dormant";
435
break;
436
case DL_STATE_AVAILABLE:
437
output = "available";
438
break;
439
case DL_STATE_CONSUMER_PROBE:
440
output = "consumer probing";
441
break;
442
case DL_STATE_ACTIVE:
443
output = "active";
444
break;
445
case DL_STATE_SUPPLIER_UNBIND:
446
output = "supplier unbinding";
447
break;
448
default:
449
output = "unknown";
450
break;
451
}
452
453
return sysfs_emit(buf, "%s\n", output);
454
}
455
static DEVICE_ATTR_RO(status);
456
457
static ssize_t auto_remove_on_show(struct device *dev,
458
struct device_attribute *attr, char *buf)
459
{
460
struct device_link *link = to_devlink(dev);
461
const char *output;
462
463
if (device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
464
output = "supplier unbind";
465
else if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER))
466
output = "consumer unbind";
467
else
468
output = "never";
469
470
return sysfs_emit(buf, "%s\n", output);
471
}
472
static DEVICE_ATTR_RO(auto_remove_on);
473
474
static ssize_t runtime_pm_show(struct device *dev,
475
struct device_attribute *attr, char *buf)
476
{
477
struct device_link *link = to_devlink(dev);
478
479
return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_PM_RUNTIME));
480
}
481
static DEVICE_ATTR_RO(runtime_pm);
482
483
static ssize_t sync_state_only_show(struct device *dev,
484
struct device_attribute *attr, char *buf)
485
{
486
struct device_link *link = to_devlink(dev);
487
488
return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
489
}
490
static DEVICE_ATTR_RO(sync_state_only);
491
492
static struct attribute *devlink_attrs[] = {
493
&dev_attr_status.attr,
494
&dev_attr_auto_remove_on.attr,
495
&dev_attr_runtime_pm.attr,
496
&dev_attr_sync_state_only.attr,
497
NULL,
498
};
499
ATTRIBUTE_GROUPS(devlink);
500
501
static void device_link_release_fn(struct work_struct *work)
502
{
503
struct device_link *link = container_of(work, struct device_link, rm_work);
504
505
/* Ensure that all references to the link object have been dropped. */
506
device_link_synchronize_removal();
507
508
pm_runtime_release_supplier(link);
509
/*
510
* If supplier_preactivated is set, the link has been dropped between
511
* the pm_runtime_get_suppliers() and pm_runtime_put_suppliers() calls
512
* in __driver_probe_device(). In that case, drop the supplier's
513
* PM-runtime usage counter to remove the reference taken by
514
* pm_runtime_get_suppliers().
515
*/
516
if (link->supplier_preactivated)
517
pm_runtime_put_noidle(link->supplier);
518
519
pm_request_idle(link->supplier);
520
521
put_device(link->consumer);
522
put_device(link->supplier);
523
kfree(link);
524
}
525
526
static void devlink_dev_release(struct device *dev)
527
{
528
struct device_link *link = to_devlink(dev);
529
530
INIT_WORK(&link->rm_work, device_link_release_fn);
531
/*
532
* It may take a while to complete this work because of the SRCU
533
* synchronization in device_link_release_fn() and if the consumer or
534
* supplier devices get deleted when it runs, so put it into the
535
* dedicated workqueue.
536
*/
537
queue_work(device_link_wq, &link->rm_work);
538
}
539
540
/**
541
* device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate
542
*/
543
void device_link_wait_removal(void)
544
{
545
/*
546
* devlink removal jobs are queued in the dedicated work queue.
547
* To be sure that all removal jobs are terminated, ensure that any
548
* scheduled work has run to completion.
549
*/
550
flush_workqueue(device_link_wq);
551
}
552
EXPORT_SYMBOL_GPL(device_link_wait_removal);
553
554
static const struct class devlink_class = {
555
.name = "devlink",
556
.dev_groups = devlink_groups,
557
.dev_release = devlink_dev_release,
558
};
559
560
static int devlink_add_symlinks(struct device *dev)
561
{
562
char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL;
563
int ret;
564
struct device_link *link = to_devlink(dev);
565
struct device *sup = link->supplier;
566
struct device *con = link->consumer;
567
568
ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
569
if (ret)
570
goto out;
571
572
ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer");
573
if (ret)
574
goto err_con;
575
576
buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
577
if (!buf_con) {
578
ret = -ENOMEM;
579
goto err_con_dev;
580
}
581
582
ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf_con);
583
if (ret)
584
goto err_con_dev;
585
586
buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
587
if (!buf_sup) {
588
ret = -ENOMEM;
589
goto err_sup_dev;
590
}
591
592
ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf_sup);
593
if (ret)
594
goto err_sup_dev;
595
596
goto out;
597
598
err_sup_dev:
599
sysfs_remove_link(&sup->kobj, buf_con);
600
err_con_dev:
601
sysfs_remove_link(&link->link_dev.kobj, "consumer");
602
err_con:
603
sysfs_remove_link(&link->link_dev.kobj, "supplier");
604
out:
605
return ret;
606
}
607
608
static void devlink_remove_symlinks(struct device *dev)
609
{
610
char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL;
611
struct device_link *link = to_devlink(dev);
612
struct device *sup = link->supplier;
613
struct device *con = link->consumer;
614
615
sysfs_remove_link(&link->link_dev.kobj, "consumer");
616
sysfs_remove_link(&link->link_dev.kobj, "supplier");
617
618
if (device_is_registered(con)) {
619
buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
620
if (!buf_sup)
621
goto out;
622
sysfs_remove_link(&con->kobj, buf_sup);
623
}
624
625
buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
626
if (!buf_con)
627
goto out;
628
sysfs_remove_link(&sup->kobj, buf_con);
629
630
return;
631
632
out:
633
WARN(1, "Unable to properly free device link symlinks!\n");
634
}
635
636
static struct class_interface devlink_class_intf = {
637
.class = &devlink_class,
638
.add_dev = devlink_add_symlinks,
639
.remove_dev = devlink_remove_symlinks,
640
};
641
642
static int __init devlink_class_init(void)
643
{
644
int ret;
645
646
ret = class_register(&devlink_class);
647
if (ret)
648
return ret;
649
650
ret = class_interface_register(&devlink_class_intf);
651
if (ret)
652
class_unregister(&devlink_class);
653
654
return ret;
655
}
656
postcore_initcall(devlink_class_init);
657
658
#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
659
DL_FLAG_AUTOREMOVE_SUPPLIER | \
660
DL_FLAG_AUTOPROBE_CONSUMER | \
661
DL_FLAG_SYNC_STATE_ONLY | \
662
DL_FLAG_INFERRED | \
663
DL_FLAG_CYCLE)
664
665
#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
666
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
667
668
/**
669
* device_link_add - Create a link between two devices.
670
* @consumer: Consumer end of the link.
671
* @supplier: Supplier end of the link.
672
* @flags: Link flags.
673
*
674
* Return: On success, a device_link struct will be returned.
675
* On error or invalid flag settings, NULL will be returned.
676
*
677
* The caller is responsible for the proper synchronization of the link creation
678
* with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
679
* runtime PM framework to take the link into account. Second, if the
680
* DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
681
* be forced into the active meta state and reference-counted upon the creation
682
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
683
* ignored.
684
*
685
* If DL_FLAG_STATELESS is set in @flags, the caller of this function is
686
* expected to release the link returned by it directly with the help of either
687
* device_link_del() or device_link_remove().
688
*
689
* If that flag is not set, however, the caller of this function is handing the
690
* management of the link over to the driver core entirely and its return value
691
* can only be used to check whether or not the link is present. In that case,
692
* the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link
693
* flags can be used to indicate to the driver core when the link can be safely
694
* deleted. Namely, setting one of them in @flags indicates to the driver core
695
* that the link is not going to be used (by the given caller of this function)
696
* after unbinding the consumer or supplier driver, respectively, from its
697
* device, so the link can be deleted at that point. If none of them is set,
698
* the link will be maintained until one of the devices pointed to by it (either
699
* the consumer or the supplier) is unregistered.
700
*
701
* Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and
702
* DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent
703
* managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can
704
* be used to request the driver core to automatically probe for a consumer
705
* driver after successfully binding a driver to the supplier device.
706
*
707
* The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER,
708
* DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at
709
* the same time is invalid and will cause NULL to be returned upfront.
710
* However, if a device link between the given @consumer and @supplier pair
711
* exists already when this function is called for them, the existing link will
712
* be returned regardless of its current type and status (the link's flags may
713
* be modified then). The caller of this function is then expected to treat
714
* the link as though it has just been created, so (in particular) if
715
* DL_FLAG_STATELESS was passed in @flags, the link needs to be released
716
* explicitly when not needed any more (as stated above).
717
*
718
* A side effect of the link creation is re-ordering of dpm_list and the
719
* devices_kset list by moving the consumer device and all devices depending
720
* on it to the ends of these lists (that does not happen to devices that have
721
* not been registered when this function is called).
722
*
723
* The supplier device is required to be registered when this function is called
724
* and NULL will be returned if that is not the case. The consumer device need
725
* not be registered, however.
726
*/
727
struct device_link *device_link_add(struct device *consumer,
728
struct device *supplier, u32 flags)
729
{
730
struct device_link *link;
731
732
if (!consumer || !supplier || consumer == supplier ||
733
flags & ~DL_ADD_VALID_FLAGS ||
734
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
735
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
736
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
737
DL_FLAG_AUTOREMOVE_SUPPLIER)))
738
return NULL;
739
740
if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
741
if (pm_runtime_get_sync(supplier) < 0) {
742
pm_runtime_put_noidle(supplier);
743
return NULL;
744
}
745
}
746
747
if (!(flags & DL_FLAG_STATELESS))
748
flags |= DL_FLAG_MANAGED;
749
750
if (flags & DL_FLAG_SYNC_STATE_ONLY &&
751
!device_link_flag_is_sync_state_only(flags))
752
return NULL;
753
754
device_links_write_lock();
755
device_pm_lock();
756
757
/*
758
* If the supplier has not been fully registered yet or there is a
759
* reverse (non-SYNC_STATE_ONLY) dependency between the consumer and
760
* the supplier already in the graph, return NULL. If the link is a
761
* SYNC_STATE_ONLY link, we don't check for reverse dependencies
762
* because it only affects sync_state() callbacks.
763
*/
764
if (!device_pm_initialized(supplier)
765
|| (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
766
device_is_dependent(consumer, supplier))) {
767
link = NULL;
768
goto out;
769
}
770
771
/*
772
* SYNC_STATE_ONLY links are useless once a consumer device has probed.
773
* So, only create it if the consumer hasn't probed yet.
774
*/
775
if (flags & DL_FLAG_SYNC_STATE_ONLY &&
776
consumer->links.status != DL_DEV_NO_DRIVER &&
777
consumer->links.status != DL_DEV_PROBING) {
778
link = NULL;
779
goto out;
780
}
781
782
/*
783
* DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed
784
* longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both
785
* together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER.
786
*/
787
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
788
flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
789
790
list_for_each_entry(link, &supplier->links.consumers, s_node) {
791
if (link->consumer != consumer)
792
continue;
793
794
if (device_link_test(link, DL_FLAG_INFERRED) &&
795
!(flags & DL_FLAG_INFERRED))
796
link->flags &= ~DL_FLAG_INFERRED;
797
798
if (flags & DL_FLAG_PM_RUNTIME) {
799
if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) {
800
pm_runtime_new_link(consumer);
801
link->flags |= DL_FLAG_PM_RUNTIME;
802
}
803
if (flags & DL_FLAG_RPM_ACTIVE)
804
refcount_inc(&link->rpm_active);
805
}
806
807
if (flags & DL_FLAG_STATELESS) {
808
kref_get(&link->kref);
809
if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
810
!device_link_test(link, DL_FLAG_STATELESS)) {
811
link->flags |= DL_FLAG_STATELESS;
812
goto reorder;
813
} else {
814
link->flags |= DL_FLAG_STATELESS;
815
goto out;
816
}
817
}
818
819
/*
820
* If the life time of the link following from the new flags is
821
* longer than indicated by the flags of the existing link,
822
* update the existing link to stay around longer.
823
*/
824
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
825
if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
826
link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
827
link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
828
}
829
} else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
830
link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
831
DL_FLAG_AUTOREMOVE_SUPPLIER);
832
}
833
if (!device_link_test(link, DL_FLAG_MANAGED)) {
834
kref_get(&link->kref);
835
link->flags |= DL_FLAG_MANAGED;
836
device_link_init_status(link, consumer, supplier);
837
}
838
if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
839
!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
840
link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
841
goto reorder;
842
}
843
844
goto out;
845
}
846
847
link = kzalloc(sizeof(*link), GFP_KERNEL);
848
if (!link)
849
goto out;
850
851
refcount_set(&link->rpm_active, 1);
852
853
get_device(supplier);
854
link->supplier = supplier;
855
INIT_LIST_HEAD(&link->s_node);
856
get_device(consumer);
857
link->consumer = consumer;
858
INIT_LIST_HEAD(&link->c_node);
859
link->flags = flags;
860
kref_init(&link->kref);
861
862
link->link_dev.class = &devlink_class;
863
device_set_pm_not_required(&link->link_dev);
864
dev_set_name(&link->link_dev, "%s:%s--%s:%s",
865
dev_bus_name(supplier), dev_name(supplier),
866
dev_bus_name(consumer), dev_name(consumer));
867
if (device_register(&link->link_dev)) {
868
put_device(&link->link_dev);
869
link = NULL;
870
goto out;
871
}
872
873
if (flags & DL_FLAG_PM_RUNTIME) {
874
if (flags & DL_FLAG_RPM_ACTIVE)
875
refcount_inc(&link->rpm_active);
876
877
pm_runtime_new_link(consumer);
878
}
879
880
/* Determine the initial link state. */
881
if (flags & DL_FLAG_STATELESS)
882
link->status = DL_STATE_NONE;
883
else
884
device_link_init_status(link, consumer, supplier);
885
886
/*
887
* Some callers expect the link creation during consumer driver probe to
888
* resume the supplier even without DL_FLAG_RPM_ACTIVE.
889
*/
890
if (link->status == DL_STATE_CONSUMER_PROBE &&
891
flags & DL_FLAG_PM_RUNTIME)
892
pm_runtime_resume(supplier);
893
894
list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
895
list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
896
897
if (flags & DL_FLAG_SYNC_STATE_ONLY) {
898
dev_dbg(consumer,
899
"Linked as a sync state only consumer to %s\n",
900
dev_name(supplier));
901
goto out;
902
}
903
904
reorder:
905
/*
906
* Move the consumer and all of the devices depending on it to the end
907
* of dpm_list and the devices_kset list.
908
*
909
* It is necessary to hold dpm_list locked throughout all that or else
910
* we may end up suspending with a wrong ordering of it.
911
*/
912
device_reorder_to_tail(consumer, NULL);
913
914
dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
915
916
out:
917
device_pm_unlock();
918
device_links_write_unlock();
919
920
if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
921
pm_runtime_put(supplier);
922
923
return link;
924
}
925
EXPORT_SYMBOL_GPL(device_link_add);
926
927
static void __device_link_del(struct kref *kref)
928
{
929
struct device_link *link = container_of(kref, struct device_link, kref);
930
931
dev_dbg(link->consumer, "Dropping the link to %s\n",
932
dev_name(link->supplier));
933
934
pm_runtime_drop_link(link);
935
936
device_link_remove_from_lists(link);
937
device_unregister(&link->link_dev);
938
}
939
940
static void device_link_put_kref(struct device_link *link)
941
{
942
if (device_link_test(link, DL_FLAG_STATELESS))
943
kref_put(&link->kref, __device_link_del);
944
else if (!device_is_registered(link->consumer))
945
__device_link_del(&link->kref);
946
else
947
WARN(1, "Unable to drop a managed device link reference\n");
948
}
949
950
/**
951
* device_link_del - Delete a stateless link between two devices.
952
* @link: Device link to delete.
953
*
954
* The caller must ensure proper synchronization of this function with runtime
955
* PM. If the link was added multiple times, it needs to be deleted as often.
956
* Care is required for hotplugged devices: Their links are purged on removal
957
* and calling device_link_del() is then no longer allowed.
958
*/
959
void device_link_del(struct device_link *link)
960
{
961
device_links_write_lock();
962
device_link_put_kref(link);
963
device_links_write_unlock();
964
}
965
EXPORT_SYMBOL_GPL(device_link_del);
966
967
/**
968
* device_link_remove - Delete a stateless link between two devices.
969
* @consumer: Consumer end of the link.
970
* @supplier: Supplier end of the link.
971
*
972
* The caller must ensure proper synchronization of this function with runtime
973
* PM.
974
*/
975
void device_link_remove(void *consumer, struct device *supplier)
976
{
977
struct device_link *link;
978
979
if (WARN_ON(consumer == supplier))
980
return;
981
982
device_links_write_lock();
983
984
list_for_each_entry(link, &supplier->links.consumers, s_node) {
985
if (link->consumer == consumer) {
986
device_link_put_kref(link);
987
break;
988
}
989
}
990
991
device_links_write_unlock();
992
}
993
EXPORT_SYMBOL_GPL(device_link_remove);
994
995
static void device_links_missing_supplier(struct device *dev)
996
{
997
struct device_link *link;
998
999
list_for_each_entry(link, &dev->links.suppliers, c_node) {
1000
if (link->status != DL_STATE_CONSUMER_PROBE)
1001
continue;
1002
1003
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
1004
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1005
} else {
1006
WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
1007
WRITE_ONCE(link->status, DL_STATE_DORMANT);
1008
}
1009
}
1010
}
1011
1012
static bool dev_is_best_effort(struct device *dev)
1013
{
1014
return (fw_devlink_best_effort && dev->can_match) ||
1015
(dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT));
1016
}
1017
1018
static struct fwnode_handle *fwnode_links_check_suppliers(
1019
struct fwnode_handle *fwnode)
1020
{
1021
struct fwnode_link *link;
1022
1023
if (!fwnode || fw_devlink_is_permissive())
1024
return NULL;
1025
1026
list_for_each_entry(link, &fwnode->suppliers, c_hook)
1027
if (!(link->flags &
1028
(FWLINK_FLAG_CYCLE | FWLINK_FLAG_IGNORE)))
1029
return link->supplier;
1030
1031
return NULL;
1032
}
1033
1034
/**
1035
* device_links_check_suppliers - Check presence of supplier drivers.
1036
* @dev: Consumer device.
1037
*
1038
* Check links from this device to any suppliers. Walk the list of the device's
1039
* links to suppliers and see if all of them are available. If not, simply
1040
* return -EPROBE_DEFER.
1041
*
1042
* We need to guarantee that the supplier will not go away after the check has
1043
* been positive here. It only can go away in __device_release_driver() and
1044
* that function checks the device's links to consumers. This means we need to
1045
* mark the link as "consumer probe in progress" to make the supplier removal
1046
* wait for us to complete (or bad things may happen).
1047
*
1048
* Links without the DL_FLAG_MANAGED flag set are ignored.
1049
*/
1050
int device_links_check_suppliers(struct device *dev)
1051
{
1052
struct device_link *link;
1053
int ret = 0, fwnode_ret = 0;
1054
struct fwnode_handle *sup_fw;
1055
1056
/*
1057
* Device waiting for supplier to become available is not allowed to
1058
* probe.
1059
*/
1060
scoped_guard(mutex, &fwnode_link_lock) {
1061
sup_fw = fwnode_links_check_suppliers(dev->fwnode);
1062
if (sup_fw) {
1063
if (dev_is_best_effort(dev))
1064
fwnode_ret = -EAGAIN;
1065
else
1066
return dev_err_probe(dev, -EPROBE_DEFER,
1067
"wait for supplier %pfwf\n", sup_fw);
1068
}
1069
}
1070
1071
device_links_write_lock();
1072
1073
list_for_each_entry(link, &dev->links.suppliers, c_node) {
1074
if (!device_link_test(link, DL_FLAG_MANAGED))
1075
continue;
1076
1077
if (link->status != DL_STATE_AVAILABLE &&
1078
!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
1079
1080
if (dev_is_best_effort(dev) &&
1081
device_link_test(link, DL_FLAG_INFERRED) &&
1082
!link->supplier->can_match) {
1083
ret = -EAGAIN;
1084
continue;
1085
}
1086
1087
device_links_missing_supplier(dev);
1088
ret = dev_err_probe(dev, -EPROBE_DEFER,
1089
"supplier %s not ready\n", dev_name(link->supplier));
1090
break;
1091
}
1092
WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1093
}
1094
dev->links.status = DL_DEV_PROBING;
1095
1096
device_links_write_unlock();
1097
1098
return ret ? ret : fwnode_ret;
1099
}
1100
1101
/**
1102
* __device_links_queue_sync_state - Queue a device for sync_state() callback
1103
* @dev: Device to call sync_state() on
1104
* @list: List head to queue the @dev on
1105
*
1106
* Queues a device for a sync_state() callback when the device links write lock
1107
* isn't held. This allows the sync_state() execution flow to use device links
1108
* APIs. The caller must ensure this function is called with
1109
* device_links_write_lock() held.
1110
*
1111
* This function does a get_device() to make sure the device is not freed while
1112
* on this list.
1113
*
1114
* So the caller must also ensure that device_links_flush_sync_list() is called
1115
* as soon as the caller releases device_links_write_lock(). This is necessary
1116
* to make sure the sync_state() is called in a timely fashion and the
1117
* put_device() is called on this device.
1118
*/
1119
static void __device_links_queue_sync_state(struct device *dev,
1120
struct list_head *list)
1121
{
1122
struct device_link *link;
1123
1124
if (!dev_has_sync_state(dev))
1125
return;
1126
if (dev->state_synced)
1127
return;
1128
1129
list_for_each_entry(link, &dev->links.consumers, s_node) {
1130
if (!device_link_test(link, DL_FLAG_MANAGED))
1131
continue;
1132
if (link->status != DL_STATE_ACTIVE)
1133
return;
1134
}
1135
1136
/*
1137
* Set the flag here to avoid adding the same device to a list more
1138
* than once. This can happen if new consumers get added to the device
1139
* and probed before the list is flushed.
1140
*/
1141
dev->state_synced = true;
1142
1143
if (WARN_ON(!list_empty(&dev->links.defer_sync)))
1144
return;
1145
1146
get_device(dev);
1147
list_add_tail(&dev->links.defer_sync, list);
1148
}
1149
1150
/**
1151
* device_links_flush_sync_list - Call sync_state() on a list of devices
1152
* @list: List of devices to call sync_state() on
1153
* @dont_lock_dev: Device for which lock is already held by the caller
1154
*
1155
* Calls sync_state() on all the devices that have been queued for it. This
1156
* function is used in conjunction with __device_links_queue_sync_state(). The
1157
* @dont_lock_dev parameter is useful when this function is called from a
1158
* context where a device lock is already held.
1159
*/
1160
static void device_links_flush_sync_list(struct list_head *list,
1161
struct device *dont_lock_dev)
1162
{
1163
struct device *dev, *tmp;
1164
1165
list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
1166
list_del_init(&dev->links.defer_sync);
1167
1168
if (dev != dont_lock_dev)
1169
device_lock(dev);
1170
1171
dev_sync_state(dev);
1172
1173
if (dev != dont_lock_dev)
1174
device_unlock(dev);
1175
1176
put_device(dev);
1177
}
1178
}
1179
1180
void device_links_supplier_sync_state_pause(void)
1181
{
1182
device_links_write_lock();
1183
defer_sync_state_count++;
1184
device_links_write_unlock();
1185
}
1186
1187
void device_links_supplier_sync_state_resume(void)
1188
{
1189
struct device *dev, *tmp;
1190
LIST_HEAD(sync_list);
1191
1192
device_links_write_lock();
1193
if (!defer_sync_state_count) {
1194
WARN(true, "Unmatched sync_state pause/resume!");
1195
goto out;
1196
}
1197
defer_sync_state_count--;
1198
if (defer_sync_state_count)
1199
goto out;
1200
1201
list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
1202
/*
1203
* Delete from deferred_sync list before queuing it to
1204
* sync_list because defer_sync is used for both lists.
1205
*/
1206
list_del_init(&dev->links.defer_sync);
1207
__device_links_queue_sync_state(dev, &sync_list);
1208
}
1209
out:
1210
device_links_write_unlock();
1211
1212
device_links_flush_sync_list(&sync_list, NULL);
1213
}
1214
1215
static int sync_state_resume_initcall(void)
1216
{
1217
device_links_supplier_sync_state_resume();
1218
return 0;
1219
}
1220
late_initcall(sync_state_resume_initcall);
1221
1222
static void __device_links_supplier_defer_sync(struct device *sup)
1223
{
1224
if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
1225
list_add_tail(&sup->links.defer_sync, &deferred_sync);
1226
}
1227
1228
static void device_link_drop_managed(struct device_link *link)
1229
{
1230
link->flags &= ~DL_FLAG_MANAGED;
1231
WRITE_ONCE(link->status, DL_STATE_NONE);
1232
kref_put(&link->kref, __device_link_del);
1233
}
1234
1235
static ssize_t waiting_for_supplier_show(struct device *dev,
1236
struct device_attribute *attr,
1237
char *buf)
1238
{
1239
bool val;
1240
1241
device_lock(dev);
1242
scoped_guard(mutex, &fwnode_link_lock)
1243
val = !!fwnode_links_check_suppliers(dev->fwnode);
1244
device_unlock(dev);
1245
return sysfs_emit(buf, "%u\n", val);
1246
}
1247
static DEVICE_ATTR_RO(waiting_for_supplier);
1248
1249
/**
1250
* device_links_force_bind - Prepares device to be force bound
1251
* @dev: Consumer device.
1252
*
1253
* device_bind_driver() force binds a device to a driver without calling any
1254
* driver probe functions. So the consumer really isn't going to wait for any
1255
* supplier before it's bound to the driver. We still want the device link
1256
* states to be sensible when this happens.
1257
*
1258
* In preparation for device_bind_driver(), this function goes through each
1259
* supplier device links and checks if the supplier is bound. If it is, then
1260
* the device link status is set to CONSUMER_PROBE. Otherwise, the device link
1261
* is dropped. Links without the DL_FLAG_MANAGED flag set are ignored.
1262
*/
1263
void device_links_force_bind(struct device *dev)
1264
{
1265
struct device_link *link, *ln;
1266
1267
device_links_write_lock();
1268
1269
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1270
if (!device_link_test(link, DL_FLAG_MANAGED))
1271
continue;
1272
1273
if (link->status != DL_STATE_AVAILABLE) {
1274
device_link_drop_managed(link);
1275
continue;
1276
}
1277
WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
1278
}
1279
dev->links.status = DL_DEV_PROBING;
1280
1281
device_links_write_unlock();
1282
}
1283
1284
/**
1285
* device_links_driver_bound - Update device links after probing its driver.
1286
* @dev: Device to update the links for.
1287
*
1288
* The probe has been successful, so update links from this device to any
1289
* consumers by changing their status to "available".
1290
*
1291
* Also change the status of @dev's links to suppliers to "active".
1292
*
1293
* Links without the DL_FLAG_MANAGED flag set are ignored.
1294
*/
1295
void device_links_driver_bound(struct device *dev)
1296
{
1297
struct device_link *link, *ln;
1298
LIST_HEAD(sync_list);
1299
1300
/*
1301
* If a device binds successfully, it's expected to have created all
1302
* the device links it needs to or make new device links as it needs
1303
* them. So, fw_devlink no longer needs to create device links to any
1304
* of the device's suppliers.
1305
*
1306
* Also, if a child firmware node of this bound device is not added as a
1307
* device by now, assume it is never going to be added. Make this bound
1308
* device the fallback supplier to the dangling consumers of the child
1309
* firmware node because this bound device is probably implementing the
1310
* child firmware node functionality and we don't want the dangling
1311
* consumers to defer probe indefinitely waiting for a device for the
1312
* child firmware node.
1313
*/
1314
if (dev->fwnode && dev->fwnode->dev == dev) {
1315
struct fwnode_handle *child;
1316
1317
fwnode_links_purge_suppliers(dev->fwnode);
1318
1319
guard(mutex)(&fwnode_link_lock);
1320
1321
fwnode_for_each_available_child_node(dev->fwnode, child)
1322
__fw_devlink_pickup_dangling_consumers(child,
1323
dev->fwnode);
1324
__fw_devlink_link_to_consumers(dev);
1325
}
1326
device_remove_file(dev, &dev_attr_waiting_for_supplier);
1327
1328
device_links_write_lock();
1329
1330
list_for_each_entry(link, &dev->links.consumers, s_node) {
1331
if (!device_link_test(link, DL_FLAG_MANAGED))
1332
continue;
1333
1334
/*
1335
* Links created during consumer probe may be in the "consumer
1336
* probe" state to start with if the supplier is still probing
1337
* when they are created and they may become "active" if the
1338
* consumer probe returns first. Skip them here.
1339
*/
1340
if (link->status == DL_STATE_CONSUMER_PROBE ||
1341
link->status == DL_STATE_ACTIVE)
1342
continue;
1343
1344
WARN_ON(link->status != DL_STATE_DORMANT);
1345
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1346
1347
if (device_link_test(link, DL_FLAG_AUTOPROBE_CONSUMER))
1348
driver_deferred_probe_add(link->consumer);
1349
}
1350
1351
if (defer_sync_state_count)
1352
__device_links_supplier_defer_sync(dev);
1353
else
1354
__device_links_queue_sync_state(dev, &sync_list);
1355
1356
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
1357
struct device *supplier;
1358
1359
if (!device_link_test(link, DL_FLAG_MANAGED))
1360
continue;
1361
1362
supplier = link->supplier;
1363
if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
1364
/*
1365
* When DL_FLAG_SYNC_STATE_ONLY is set, it means no
1366
* other DL_MANAGED_LINK_FLAGS have been set. So, it's
1367
* save to drop the managed link completely.
1368
*/
1369
device_link_drop_managed(link);
1370
} else if (dev_is_best_effort(dev) &&
1371
device_link_test(link, DL_FLAG_INFERRED) &&
1372
link->status != DL_STATE_CONSUMER_PROBE &&
1373
!link->supplier->can_match) {
1374
/*
1375
* When dev_is_best_effort() is true, we ignore device
1376
* links to suppliers that don't have a driver. If the
1377
* consumer device still managed to probe, there's no
1378
* point in maintaining a device link in a weird state
1379
* (consumer probed before supplier). So delete it.
1380
*/
1381
device_link_drop_managed(link);
1382
} else {
1383
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
1384
WRITE_ONCE(link->status, DL_STATE_ACTIVE);
1385
}
1386
1387
/*
1388
* This needs to be done even for the deleted
1389
* DL_FLAG_SYNC_STATE_ONLY device link in case it was the last
1390
* device link that was preventing the supplier from getting a
1391
* sync_state() call.
1392
*/
1393
if (defer_sync_state_count)
1394
__device_links_supplier_defer_sync(supplier);
1395
else
1396
__device_links_queue_sync_state(supplier, &sync_list);
1397
}
1398
1399
dev->links.status = DL_DEV_DRIVER_BOUND;
1400
1401
device_links_write_unlock();
1402
1403
device_links_flush_sync_list(&sync_list, dev);
1404
}
1405
1406
/**
1407
* __device_links_no_driver - Update links of a device without a driver.
1408
* @dev: Device without a drvier.
1409
*
1410
* Delete all non-persistent links from this device to any suppliers.
1411
*
1412
* Persistent links stay around, but their status is changed to "available",
1413
* unless they already are in the "supplier unbind in progress" state in which
1414
* case they need not be updated.
1415
*
1416
* Links without the DL_FLAG_MANAGED flag set are ignored.
1417
*/
1418
static void __device_links_no_driver(struct device *dev)
1419
{
1420
struct device_link *link, *ln;
1421
1422
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1423
if (!device_link_test(link, DL_FLAG_MANAGED))
1424
continue;
1425
1426
if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
1427
device_link_drop_managed(link);
1428
continue;
1429
}
1430
1431
if (link->status != DL_STATE_CONSUMER_PROBE &&
1432
link->status != DL_STATE_ACTIVE)
1433
continue;
1434
1435
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
1436
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
1437
} else {
1438
WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
1439
WRITE_ONCE(link->status, DL_STATE_DORMANT);
1440
}
1441
}
1442
1443
dev->links.status = DL_DEV_NO_DRIVER;
1444
}
1445
1446
/**
1447
* device_links_no_driver - Update links after failing driver probe.
1448
* @dev: Device whose driver has just failed to probe.
1449
*
1450
* Clean up leftover links to consumers for @dev and invoke
1451
* %__device_links_no_driver() to update links to suppliers for it as
1452
* appropriate.
1453
*
1454
* Links without the DL_FLAG_MANAGED flag set are ignored.
1455
*/
1456
void device_links_no_driver(struct device *dev)
1457
{
1458
struct device_link *link;
1459
1460
device_links_write_lock();
1461
1462
list_for_each_entry(link, &dev->links.consumers, s_node) {
1463
if (!device_link_test(link, DL_FLAG_MANAGED))
1464
continue;
1465
1466
/*
1467
* The probe has failed, so if the status of the link is
1468
* "consumer probe" or "active", it must have been added by
1469
* a probing consumer while this device was still probing.
1470
* Change its state to "dormant", as it represents a valid
1471
* relationship, but it is not functionally meaningful.
1472
*/
1473
if (link->status == DL_STATE_CONSUMER_PROBE ||
1474
link->status == DL_STATE_ACTIVE)
1475
WRITE_ONCE(link->status, DL_STATE_DORMANT);
1476
}
1477
1478
__device_links_no_driver(dev);
1479
1480
device_links_write_unlock();
1481
}
1482
1483
/**
1484
* device_links_driver_cleanup - Update links after driver removal.
1485
* @dev: Device whose driver has just gone away.
1486
*
1487
* Update links to consumers for @dev by changing their status to "dormant" and
1488
* invoke %__device_links_no_driver() to update links to suppliers for it as
1489
* appropriate.
1490
*
1491
* Links without the DL_FLAG_MANAGED flag set are ignored.
1492
*/
1493
void device_links_driver_cleanup(struct device *dev)
1494
{
1495
struct device_link *link, *ln;
1496
1497
device_links_write_lock();
1498
1499
list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
1500
if (!device_link_test(link, DL_FLAG_MANAGED))
1501
continue;
1502
1503
WARN_ON(device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER));
1504
WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
1505
1506
/*
1507
* autoremove the links between this @dev and its consumer
1508
* devices that are not active, i.e. where the link state
1509
* has moved to DL_STATE_SUPPLIER_UNBIND.
1510
*/
1511
if (link->status == DL_STATE_SUPPLIER_UNBIND &&
1512
device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
1513
device_link_drop_managed(link);
1514
1515
WRITE_ONCE(link->status, DL_STATE_DORMANT);
1516
}
1517
1518
list_del_init(&dev->links.defer_sync);
1519
__device_links_no_driver(dev);
1520
1521
device_links_write_unlock();
1522
}
1523
1524
/**
1525
* device_links_busy - Check if there are any busy links to consumers.
1526
* @dev: Device to check.
1527
*
1528
* Check each consumer of the device and return 'true' if its link's status
1529
* is one of "consumer probe" or "active" (meaning that the given consumer is
1530
* probing right now or its driver is present). Otherwise, change the link
1531
* state to "supplier unbind" to prevent the consumer from being probed
1532
* successfully going forward.
1533
*
1534
* Return 'false' if there are no probing or active consumers.
1535
*
1536
* Links without the DL_FLAG_MANAGED flag set are ignored.
1537
*/
1538
bool device_links_busy(struct device *dev)
1539
{
1540
struct device_link *link;
1541
bool ret = false;
1542
1543
device_links_write_lock();
1544
1545
list_for_each_entry(link, &dev->links.consumers, s_node) {
1546
if (!device_link_test(link, DL_FLAG_MANAGED))
1547
continue;
1548
1549
if (link->status == DL_STATE_CONSUMER_PROBE
1550
|| link->status == DL_STATE_ACTIVE) {
1551
ret = true;
1552
break;
1553
}
1554
WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1555
}
1556
1557
dev->links.status = DL_DEV_UNBINDING;
1558
1559
device_links_write_unlock();
1560
return ret;
1561
}
1562
1563
/**
1564
* device_links_unbind_consumers - Force unbind consumers of the given device.
1565
* @dev: Device to unbind the consumers of.
1566
*
1567
* Walk the list of links to consumers for @dev and if any of them is in the
1568
* "consumer probe" state, wait for all device probes in progress to complete
1569
* and start over.
1570
*
1571
* If that's not the case, change the status of the link to "supplier unbind"
1572
* and check if the link was in the "active" state. If so, force the consumer
1573
* driver to unbind and start over (the consumer will not re-probe as we have
1574
* changed the state of the link already).
1575
*
1576
* Links without the DL_FLAG_MANAGED flag set are ignored.
1577
*/
1578
void device_links_unbind_consumers(struct device *dev)
1579
{
1580
struct device_link *link;
1581
1582
start:
1583
device_links_write_lock();
1584
1585
list_for_each_entry(link, &dev->links.consumers, s_node) {
1586
enum device_link_state status;
1587
1588
if (!device_link_test(link, DL_FLAG_MANAGED) ||
1589
device_link_test(link, DL_FLAG_SYNC_STATE_ONLY))
1590
continue;
1591
1592
status = link->status;
1593
if (status == DL_STATE_CONSUMER_PROBE) {
1594
device_links_write_unlock();
1595
1596
wait_for_device_probe();
1597
goto start;
1598
}
1599
WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
1600
if (status == DL_STATE_ACTIVE) {
1601
struct device *consumer = link->consumer;
1602
1603
get_device(consumer);
1604
1605
device_links_write_unlock();
1606
1607
device_release_driver_internal(consumer, NULL,
1608
consumer->parent);
1609
put_device(consumer);
1610
goto start;
1611
}
1612
}
1613
1614
device_links_write_unlock();
1615
}
1616
1617
/**
1618
* device_links_purge - Delete existing links to other devices.
1619
* @dev: Target device.
1620
*/
1621
static void device_links_purge(struct device *dev)
1622
{
1623
struct device_link *link, *ln;
1624
1625
if (dev->class == &devlink_class)
1626
return;
1627
1628
/*
1629
* Delete all of the remaining links from this device to any other
1630
* devices (either consumers or suppliers).
1631
*/
1632
device_links_write_lock();
1633
1634
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
1635
WARN_ON(link->status == DL_STATE_ACTIVE);
1636
__device_link_del(&link->kref);
1637
}
1638
1639
list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
1640
WARN_ON(link->status != DL_STATE_DORMANT &&
1641
link->status != DL_STATE_NONE);
1642
__device_link_del(&link->kref);
1643
}
1644
1645
device_links_write_unlock();
1646
}
1647
1648
#define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \
1649
DL_FLAG_SYNC_STATE_ONLY)
1650
#define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \
1651
DL_FLAG_AUTOPROBE_CONSUMER)
1652
#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
1653
DL_FLAG_PM_RUNTIME)
1654
1655
static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
1656
static int __init fw_devlink_setup(char *arg)
1657
{
1658
if (!arg)
1659
return -EINVAL;
1660
1661
if (strcmp(arg, "off") == 0) {
1662
fw_devlink_flags = 0;
1663
} else if (strcmp(arg, "permissive") == 0) {
1664
fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
1665
} else if (strcmp(arg, "on") == 0) {
1666
fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
1667
} else if (strcmp(arg, "rpm") == 0) {
1668
fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
1669
}
1670
return 0;
1671
}
1672
early_param("fw_devlink", fw_devlink_setup);
1673
1674
static bool fw_devlink_strict;
1675
static int __init fw_devlink_strict_setup(char *arg)
1676
{
1677
return kstrtobool(arg, &fw_devlink_strict);
1678
}
1679
early_param("fw_devlink.strict", fw_devlink_strict_setup);
1680
1681
#define FW_DEVLINK_SYNC_STATE_STRICT 0
1682
#define FW_DEVLINK_SYNC_STATE_TIMEOUT 1
1683
1684
#ifndef CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT
1685
static int fw_devlink_sync_state;
1686
#else
1687
static int fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT;
1688
#endif
1689
1690
static int __init fw_devlink_sync_state_setup(char *arg)
1691
{
1692
if (!arg)
1693
return -EINVAL;
1694
1695
if (strcmp(arg, "strict") == 0) {
1696
fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_STRICT;
1697
return 0;
1698
} else if (strcmp(arg, "timeout") == 0) {
1699
fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT;
1700
return 0;
1701
}
1702
return -EINVAL;
1703
}
1704
early_param("fw_devlink.sync_state", fw_devlink_sync_state_setup);
1705
1706
static inline u32 fw_devlink_get_flags(u8 fwlink_flags)
1707
{
1708
if (fwlink_flags & FWLINK_FLAG_CYCLE)
1709
return FW_DEVLINK_FLAGS_PERMISSIVE | DL_FLAG_CYCLE;
1710
1711
return fw_devlink_flags;
1712
}
1713
1714
static bool fw_devlink_is_permissive(void)
1715
{
1716
return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
1717
}
1718
1719
bool fw_devlink_is_strict(void)
1720
{
1721
return fw_devlink_strict && !fw_devlink_is_permissive();
1722
}
1723
1724
static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
1725
{
1726
if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
1727
return;
1728
1729
fwnode_call_int_op(fwnode, add_links);
1730
fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
1731
}
1732
1733
static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
1734
{
1735
struct fwnode_handle *child = NULL;
1736
1737
fw_devlink_parse_fwnode(fwnode);
1738
1739
while ((child = fwnode_get_next_available_child_node(fwnode, child)))
1740
fw_devlink_parse_fwtree(child);
1741
}
1742
1743
static void fw_devlink_relax_link(struct device_link *link)
1744
{
1745
if (!device_link_test(link, DL_FLAG_INFERRED))
1746
return;
1747
1748
if (device_link_flag_is_sync_state_only(link->flags))
1749
return;
1750
1751
pm_runtime_drop_link(link);
1752
link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
1753
dev_dbg(link->consumer, "Relaxing link with %s\n",
1754
dev_name(link->supplier));
1755
}
1756
1757
static int fw_devlink_no_driver(struct device *dev, void *data)
1758
{
1759
struct device_link *link = to_devlink(dev);
1760
1761
if (!link->supplier->can_match)
1762
fw_devlink_relax_link(link);
1763
1764
return 0;
1765
}
1766
1767
void fw_devlink_drivers_done(void)
1768
{
1769
fw_devlink_drv_reg_done = true;
1770
device_links_write_lock();
1771
class_for_each_device(&devlink_class, NULL, NULL,
1772
fw_devlink_no_driver);
1773
device_links_write_unlock();
1774
}
1775
1776
static int fw_devlink_dev_sync_state(struct device *dev, void *data)
1777
{
1778
struct device_link *link = to_devlink(dev);
1779
struct device *sup = link->supplier;
1780
1781
if (!device_link_test(link, DL_FLAG_MANAGED) ||
1782
link->status == DL_STATE_ACTIVE || sup->state_synced ||
1783
!dev_has_sync_state(sup))
1784
return 0;
1785
1786
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
1787
dev_warn(sup, "sync_state() pending due to %s\n",
1788
dev_name(link->consumer));
1789
return 0;
1790
}
1791
1792
if (!list_empty(&sup->links.defer_sync))
1793
return 0;
1794
1795
dev_warn(sup, "Timed out. Forcing sync_state()\n");
1796
sup->state_synced = true;
1797
get_device(sup);
1798
list_add_tail(&sup->links.defer_sync, data);
1799
1800
return 0;
1801
}
1802
1803
void fw_devlink_probing_done(void)
1804
{
1805
LIST_HEAD(sync_list);
1806
1807
device_links_write_lock();
1808
class_for_each_device(&devlink_class, NULL, &sync_list,
1809
fw_devlink_dev_sync_state);
1810
device_links_write_unlock();
1811
device_links_flush_sync_list(&sync_list, NULL);
1812
}
1813
1814
/**
1815
* wait_for_init_devices_probe - Try to probe any device needed for init
1816
*
1817
* Some devices might need to be probed and bound successfully before the kernel
1818
* boot sequence can finish and move on to init/userspace. For example, a
1819
* network interface might need to be bound to be able to mount a NFS rootfs.
1820
*
1821
* With fw_devlink=on by default, some of these devices might be blocked from
1822
* probing because they are waiting on a optional supplier that doesn't have a
1823
* driver. While fw_devlink will eventually identify such devices and unblock
1824
* the probing automatically, it might be too late by the time it unblocks the
1825
* probing of devices. For example, the IP4 autoconfig might timeout before
1826
* fw_devlink unblocks probing of the network interface.
1827
*
1828
* This function is available to temporarily try and probe all devices that have
1829
* a driver even if some of their suppliers haven't been added or don't have
1830
* drivers.
1831
*
1832
* The drivers can then decide which of the suppliers are optional vs mandatory
1833
* and probe the device if possible. By the time this function returns, all such
1834
* "best effort" probes are guaranteed to be completed. If a device successfully
1835
* probes in this mode, we delete all fw_devlink discovered dependencies of that
1836
* device where the supplier hasn't yet probed successfully because they have to
1837
* be optional dependencies.
1838
*
1839
* Any devices that didn't successfully probe go back to being treated as if
1840
* this function was never called.
1841
*
1842
* This also means that some devices that aren't needed for init and could have
1843
* waited for their optional supplier to probe (when the supplier's module is
1844
* loaded later on) would end up probing prematurely with limited functionality.
1845
* So call this function only when boot would fail without it.
1846
*/
1847
void __init wait_for_init_devices_probe(void)
1848
{
1849
if (!fw_devlink_flags || fw_devlink_is_permissive())
1850
return;
1851
1852
/*
1853
* Wait for all ongoing probes to finish so that the "best effort" is
1854
* only applied to devices that can't probe otherwise.
1855
*/
1856
wait_for_device_probe();
1857
1858
pr_info("Trying to probe devices needed for running init ...\n");
1859
fw_devlink_best_effort = true;
1860
driver_deferred_probe_trigger();
1861
1862
/*
1863
* Wait for all "best effort" probes to finish before going back to
1864
* normal enforcement.
1865
*/
1866
wait_for_device_probe();
1867
fw_devlink_best_effort = false;
1868
}
1869
1870
static void fw_devlink_unblock_consumers(struct device *dev)
1871
{
1872
struct device_link *link;
1873
1874
if (!fw_devlink_flags || fw_devlink_is_permissive())
1875
return;
1876
1877
device_links_write_lock();
1878
list_for_each_entry(link, &dev->links.consumers, s_node)
1879
fw_devlink_relax_link(link);
1880
device_links_write_unlock();
1881
}
1882
1883
static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
1884
{
1885
struct device *dev;
1886
bool ret;
1887
1888
if (!(fwnode->flags & FWNODE_FLAG_INITIALIZED))
1889
return false;
1890
1891
dev = get_dev_from_fwnode(fwnode);
1892
ret = !dev || dev->links.status == DL_DEV_NO_DRIVER;
1893
put_device(dev);
1894
1895
return ret;
1896
}
1897
1898
static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode)
1899
{
1900
struct fwnode_handle *parent;
1901
1902
fwnode_for_each_parent_node(fwnode, parent) {
1903
if (fwnode_init_without_drv(parent)) {
1904
fwnode_handle_put(parent);
1905
return true;
1906
}
1907
}
1908
1909
return false;
1910
}
1911
1912
/**
1913
* fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child
1914
* @ancestor: Firmware which is tested for being an ancestor
1915
* @child: Firmware which is tested for being the child
1916
*
1917
* A node is considered an ancestor of itself too.
1918
*
1919
* Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
1920
*/
1921
static bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor,
1922
const struct fwnode_handle *child)
1923
{
1924
struct fwnode_handle *parent;
1925
1926
if (IS_ERR_OR_NULL(ancestor))
1927
return false;
1928
1929
if (child == ancestor)
1930
return true;
1931
1932
fwnode_for_each_parent_node(child, parent) {
1933
if (parent == ancestor) {
1934
fwnode_handle_put(parent);
1935
return true;
1936
}
1937
}
1938
return false;
1939
}
1940
1941
/**
1942
* fwnode_get_next_parent_dev - Find device of closest ancestor fwnode
1943
* @fwnode: firmware node
1944
*
1945
* Given a firmware node (@fwnode), this function finds its closest ancestor
1946
* firmware node that has a corresponding struct device and returns that struct
1947
* device.
1948
*
1949
* The caller is responsible for calling put_device() on the returned device
1950
* pointer.
1951
*
1952
* Return: a pointer to the device of the @fwnode's closest ancestor.
1953
*/
1954
static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode)
1955
{
1956
struct fwnode_handle *parent;
1957
struct device *dev;
1958
1959
fwnode_for_each_parent_node(fwnode, parent) {
1960
dev = get_dev_from_fwnode(parent);
1961
if (dev) {
1962
fwnode_handle_put(parent);
1963
return dev;
1964
}
1965
}
1966
return NULL;
1967
}
1968
1969
/**
1970
* __fw_devlink_relax_cycles - Relax and mark dependency cycles.
1971
* @con_handle: Potential consumer device fwnode.
1972
* @sup_handle: Potential supplier's fwnode.
1973
*
1974
* Needs to be called with fwnode_lock and device link lock held.
1975
*
1976
* Check if @sup_handle or any of its ancestors or suppliers direct/indirectly
1977
* depend on @con. This function can detect multiple cyles between @sup_handle
1978
* and @con. When such dependency cycles are found, convert all device links
1979
* created solely by fw_devlink into SYNC_STATE_ONLY device links. Also, mark
1980
* all fwnode links in the cycle with FWLINK_FLAG_CYCLE so that when they are
1981
* converted into a device link in the future, they are created as
1982
* SYNC_STATE_ONLY device links. This is the equivalent of doing
1983
* fw_devlink=permissive just between the devices in the cycle. We need to do
1984
* this because, at this point, fw_devlink can't tell which of these
1985
* dependencies is not a real dependency.
1986
*
1987
* Return true if one or more cycles were found. Otherwise, return false.
1988
*/
1989
static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
1990
struct fwnode_handle *sup_handle)
1991
{
1992
struct device *sup_dev = NULL, *par_dev = NULL, *con_dev = NULL;
1993
struct fwnode_link *link;
1994
struct device_link *dev_link;
1995
bool ret = false;
1996
1997
if (!sup_handle)
1998
return false;
1999
2000
/*
2001
* We aren't trying to find all cycles. Just a cycle between con and
2002
* sup_handle.
2003
*/
2004
if (sup_handle->flags & FWNODE_FLAG_VISITED)
2005
return false;
2006
2007
sup_handle->flags |= FWNODE_FLAG_VISITED;
2008
2009
/* Termination condition. */
2010
if (sup_handle == con_handle) {
2011
pr_debug("----- cycle: start -----\n");
2012
ret = true;
2013
goto out;
2014
}
2015
2016
sup_dev = get_dev_from_fwnode(sup_handle);
2017
con_dev = get_dev_from_fwnode(con_handle);
2018
/*
2019
* If sup_dev is bound to a driver and @con hasn't started binding to a
2020
* driver, sup_dev can't be a consumer of @con. So, no need to check
2021
* further.
2022
*/
2023
if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND &&
2024
con_dev && con_dev->links.status == DL_DEV_NO_DRIVER) {
2025
ret = false;
2026
goto out;
2027
}
2028
2029
list_for_each_entry(link, &sup_handle->suppliers, c_hook) {
2030
if (link->flags & FWLINK_FLAG_IGNORE)
2031
continue;
2032
2033
if (__fw_devlink_relax_cycles(con_handle, link->supplier)) {
2034
__fwnode_link_cycle(link);
2035
ret = true;
2036
}
2037
}
2038
2039
/*
2040
* Give priority to device parent over fwnode parent to account for any
2041
* quirks in how fwnodes are converted to devices.
2042
*/
2043
if (sup_dev)
2044
par_dev = get_device(sup_dev->parent);
2045
else
2046
par_dev = fwnode_get_next_parent_dev(sup_handle);
2047
2048
if (par_dev && __fw_devlink_relax_cycles(con_handle, par_dev->fwnode)) {
2049
pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle,
2050
par_dev->fwnode);
2051
ret = true;
2052
}
2053
2054
if (!sup_dev)
2055
goto out;
2056
2057
list_for_each_entry(dev_link, &sup_dev->links.suppliers, c_node) {
2058
/*
2059
* Ignore a SYNC_STATE_ONLY flag only if it wasn't marked as
2060
* such due to a cycle.
2061
*/
2062
if (device_link_flag_is_sync_state_only(dev_link->flags) &&
2063
!device_link_test(dev_link, DL_FLAG_CYCLE))
2064
continue;
2065
2066
if (__fw_devlink_relax_cycles(con_handle,
2067
dev_link->supplier->fwnode)) {
2068
pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle,
2069
dev_link->supplier->fwnode);
2070
fw_devlink_relax_link(dev_link);
2071
dev_link->flags |= DL_FLAG_CYCLE;
2072
ret = true;
2073
}
2074
}
2075
2076
out:
2077
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
2078
put_device(sup_dev);
2079
put_device(con_dev);
2080
put_device(par_dev);
2081
return ret;
2082
}
2083
2084
/**
2085
* fw_devlink_create_devlink - Create a device link from a consumer to fwnode
2086
* @con: consumer device for the device link
2087
* @sup_handle: fwnode handle of supplier
2088
* @link: fwnode link that's being converted to a device link
2089
*
2090
* This function will try to create a device link between the consumer device
2091
* @con and the supplier device represented by @sup_handle.
2092
*
2093
* The supplier has to be provided as a fwnode because incorrect cycles in
2094
* fwnode links can sometimes cause the supplier device to never be created.
2095
* This function detects such cases and returns an error if it cannot create a
2096
* device link from the consumer to a missing supplier.
2097
*
2098
* Returns,
2099
* 0 on successfully creating a device link
2100
* -EINVAL if the device link cannot be created as expected
2101
* -EAGAIN if the device link cannot be created right now, but it may be
2102
* possible to do that in the future
2103
*/
2104
static int fw_devlink_create_devlink(struct device *con,
2105
struct fwnode_handle *sup_handle,
2106
struct fwnode_link *link)
2107
{
2108
struct device *sup_dev;
2109
int ret = 0;
2110
u32 flags;
2111
2112
if (link->flags & FWLINK_FLAG_IGNORE)
2113
return 0;
2114
2115
/*
2116
* In some cases, a device P might also be a supplier to its child node
2117
* C. However, this would defer the probe of C until the probe of P
2118
* completes successfully. This is perfectly fine in the device driver
2119
* model. device_add() doesn't guarantee probe completion of the device
2120
* by the time it returns.
2121
*
2122
* However, there are a few drivers that assume C will finish probing
2123
* as soon as it's added and before P finishes probing. So, we provide
2124
* a flag to let fw_devlink know not to delay the probe of C until the
2125
* probe of P completes successfully.
2126
*
2127
* When such a flag is set, we can't create device links where P is the
2128
* supplier of C as that would delay the probe of C.
2129
*/
2130
if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
2131
fwnode_is_ancestor_of(sup_handle, con->fwnode))
2132
return -EINVAL;
2133
2134
/*
2135
* Don't try to optimize by not calling the cycle detection logic under
2136
* certain conditions. There's always some corner case that won't get
2137
* detected.
2138
*/
2139
device_links_write_lock();
2140
if (__fw_devlink_relax_cycles(link->consumer, sup_handle)) {
2141
__fwnode_link_cycle(link);
2142
pr_debug("----- cycle: end -----\n");
2143
pr_info("%pfwf: Fixed dependency cycle(s) with %pfwf\n",
2144
link->consumer, sup_handle);
2145
}
2146
device_links_write_unlock();
2147
2148
if (con->fwnode == link->consumer)
2149
flags = fw_devlink_get_flags(link->flags);
2150
else
2151
flags = FW_DEVLINK_FLAGS_PERMISSIVE;
2152
2153
if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
2154
sup_dev = fwnode_get_next_parent_dev(sup_handle);
2155
else
2156
sup_dev = get_dev_from_fwnode(sup_handle);
2157
2158
if (sup_dev) {
2159
/*
2160
* If it's one of those drivers that don't actually bind to
2161
* their device using driver core, then don't wait on this
2162
* supplier device indefinitely.
2163
*/
2164
if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
2165
sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
2166
dev_dbg(con,
2167
"Not linking %pfwf - dev might never probe\n",
2168
sup_handle);
2169
ret = -EINVAL;
2170
goto out;
2171
}
2172
2173
if (con != sup_dev && !device_link_add(con, sup_dev, flags)) {
2174
dev_err(con, "Failed to create device link (0x%x) with supplier %s for %pfwf\n",
2175
flags, dev_name(sup_dev), link->consumer);
2176
ret = -EINVAL;
2177
}
2178
2179
goto out;
2180
}
2181
2182
/*
2183
* Supplier or supplier's ancestor already initialized without a struct
2184
* device or being probed by a driver.
2185
*/
2186
if (fwnode_init_without_drv(sup_handle) ||
2187
fwnode_ancestor_init_without_drv(sup_handle)) {
2188
dev_dbg(con, "Not linking %pfwf - might never become dev\n",
2189
sup_handle);
2190
return -EINVAL;
2191
}
2192
2193
ret = -EAGAIN;
2194
out:
2195
put_device(sup_dev);
2196
return ret;
2197
}
2198
2199
/**
2200
* __fw_devlink_link_to_consumers - Create device links to consumers of a device
2201
* @dev: Device that needs to be linked to its consumers
2202
*
2203
* This function looks at all the consumer fwnodes of @dev and creates device
2204
* links between the consumer device and @dev (supplier).
2205
*
2206
* If the consumer device has not been added yet, then this function creates a
2207
* SYNC_STATE_ONLY link between @dev (supplier) and the closest ancestor device
2208
* of the consumer fwnode. This is necessary to make sure @dev doesn't get a
2209
* sync_state() callback before the real consumer device gets to be added and
2210
* then probed.
2211
*
2212
* Once device links are created from the real consumer to @dev (supplier), the
2213
* fwnode links are deleted.
2214
*/
2215
static void __fw_devlink_link_to_consumers(struct device *dev)
2216
{
2217
struct fwnode_handle *fwnode = dev->fwnode;
2218
struct fwnode_link *link, *tmp;
2219
2220
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
2221
struct device *con_dev;
2222
bool own_link = true;
2223
int ret;
2224
2225
con_dev = get_dev_from_fwnode(link->consumer);
2226
/*
2227
* If consumer device is not available yet, make a "proxy"
2228
* SYNC_STATE_ONLY link from the consumer's parent device to
2229
* the supplier device. This is necessary to make sure the
2230
* supplier doesn't get a sync_state() callback before the real
2231
* consumer can create a device link to the supplier.
2232
*
2233
* This proxy link step is needed to handle the case where the
2234
* consumer's parent device is added before the supplier.
2235
*/
2236
if (!con_dev) {
2237
con_dev = fwnode_get_next_parent_dev(link->consumer);
2238
/*
2239
* However, if the consumer's parent device is also the
2240
* parent of the supplier, don't create a
2241
* consumer-supplier link from the parent to its child
2242
* device. Such a dependency is impossible.
2243
*/
2244
if (con_dev &&
2245
fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) {
2246
put_device(con_dev);
2247
con_dev = NULL;
2248
} else {
2249
own_link = false;
2250
}
2251
}
2252
2253
if (!con_dev)
2254
continue;
2255
2256
ret = fw_devlink_create_devlink(con_dev, fwnode, link);
2257
put_device(con_dev);
2258
if (!own_link || ret == -EAGAIN)
2259
continue;
2260
2261
__fwnode_link_del(link);
2262
}
2263
}
2264
2265
/**
2266
* __fw_devlink_link_to_suppliers - Create device links to suppliers of a device
2267
* @dev: The consumer device that needs to be linked to its suppliers
2268
* @fwnode: Root of the fwnode tree that is used to create device links
2269
*
2270
* This function looks at all the supplier fwnodes of fwnode tree rooted at
2271
* @fwnode and creates device links between @dev (consumer) and all the
2272
* supplier devices of the entire fwnode tree at @fwnode.
2273
*
2274
* The function creates normal (non-SYNC_STATE_ONLY) device links between @dev
2275
* and the real suppliers of @dev. Once these device links are created, the
2276
* fwnode links are deleted.
2277
*
2278
* In addition, it also looks at all the suppliers of the entire fwnode tree
2279
* because some of the child devices of @dev that have not been added yet
2280
* (because @dev hasn't probed) might already have their suppliers added to
2281
* driver core. So, this function creates SYNC_STATE_ONLY device links between
2282
* @dev (consumer) and these suppliers to make sure they don't execute their
2283
* sync_state() callbacks before these child devices have a chance to create
2284
* their device links. The fwnode links that correspond to the child devices
2285
* aren't delete because they are needed later to create the device links
2286
* between the real consumer and supplier devices.
2287
*/
2288
static void __fw_devlink_link_to_suppliers(struct device *dev,
2289
struct fwnode_handle *fwnode)
2290
{
2291
bool own_link = (dev->fwnode == fwnode);
2292
struct fwnode_link *link, *tmp;
2293
struct fwnode_handle *child = NULL;
2294
2295
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
2296
int ret;
2297
struct fwnode_handle *sup = link->supplier;
2298
2299
ret = fw_devlink_create_devlink(dev, sup, link);
2300
if (!own_link || ret == -EAGAIN)
2301
continue;
2302
2303
__fwnode_link_del(link);
2304
}
2305
2306
/*
2307
* Make "proxy" SYNC_STATE_ONLY device links to represent the needs of
2308
* all the descendants. This proxy link step is needed to handle the
2309
* case where the supplier is added before the consumer's parent device
2310
* (@dev).
2311
*/
2312
while ((child = fwnode_get_next_available_child_node(fwnode, child)))
2313
__fw_devlink_link_to_suppliers(dev, child);
2314
}
2315
2316
static void fw_devlink_link_device(struct device *dev)
2317
{
2318
struct fwnode_handle *fwnode = dev->fwnode;
2319
2320
if (!fw_devlink_flags)
2321
return;
2322
2323
fw_devlink_parse_fwtree(fwnode);
2324
2325
guard(mutex)(&fwnode_link_lock);
2326
2327
__fw_devlink_link_to_consumers(dev);
2328
__fw_devlink_link_to_suppliers(dev, fwnode);
2329
}
2330
2331
/* Device links support end. */
2332
2333
static struct kobject *dev_kobj;
2334
2335
/* /sys/dev/char */
2336
static struct kobject *sysfs_dev_char_kobj;
2337
2338
/* /sys/dev/block */
2339
static struct kobject *sysfs_dev_block_kobj;
2340
2341
static DEFINE_MUTEX(device_hotplug_lock);
2342
2343
void lock_device_hotplug(void)
2344
{
2345
mutex_lock(&device_hotplug_lock);
2346
}
2347
2348
void unlock_device_hotplug(void)
2349
{
2350
mutex_unlock(&device_hotplug_lock);
2351
}
2352
2353
int lock_device_hotplug_sysfs(void)
2354
{
2355
if (mutex_trylock(&device_hotplug_lock))
2356
return 0;
2357
2358
/* Avoid busy looping (5 ms of sleep should do). */
2359
msleep(5);
2360
return restart_syscall();
2361
}
2362
2363
#ifdef CONFIG_BLOCK
2364
static inline int device_is_not_partition(struct device *dev)
2365
{
2366
return !(dev->type == &part_type);
2367
}
2368
#else
2369
static inline int device_is_not_partition(struct device *dev)
2370
{
2371
return 1;
2372
}
2373
#endif
2374
2375
static void device_platform_notify(struct device *dev)
2376
{
2377
acpi_device_notify(dev);
2378
2379
software_node_notify(dev);
2380
}
2381
2382
static void device_platform_notify_remove(struct device *dev)
2383
{
2384
software_node_notify_remove(dev);
2385
2386
acpi_device_notify_remove(dev);
2387
}
2388
2389
/**
2390
* dev_driver_string - Return a device's driver name, if at all possible
2391
* @dev: struct device to get the name of
2392
*
2393
* Will return the device's driver's name if it is bound to a device. If
2394
* the device is not bound to a driver, it will return the name of the bus
2395
* it is attached to. If it is not attached to a bus either, an empty
2396
* string will be returned.
2397
*/
2398
const char *dev_driver_string(const struct device *dev)
2399
{
2400
struct device_driver *drv;
2401
2402
/* dev->driver can change to NULL underneath us because of unbinding,
2403
* so be careful about accessing it. dev->bus and dev->class should
2404
* never change once they are set, so they don't need special care.
2405
*/
2406
drv = READ_ONCE(dev->driver);
2407
return drv ? drv->name : dev_bus_name(dev);
2408
}
2409
EXPORT_SYMBOL(dev_driver_string);
2410
2411
#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
2412
2413
static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
2414
char *buf)
2415
{
2416
struct device_attribute *dev_attr = to_dev_attr(attr);
2417
struct device *dev = kobj_to_dev(kobj);
2418
ssize_t ret = -EIO;
2419
2420
if (dev_attr->show)
2421
ret = dev_attr->show(dev, dev_attr, buf);
2422
if (ret >= (ssize_t)PAGE_SIZE) {
2423
printk("dev_attr_show: %pS returned bad count\n",
2424
dev_attr->show);
2425
}
2426
return ret;
2427
}
2428
2429
static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
2430
const char *buf, size_t count)
2431
{
2432
struct device_attribute *dev_attr = to_dev_attr(attr);
2433
struct device *dev = kobj_to_dev(kobj);
2434
ssize_t ret = -EIO;
2435
2436
if (dev_attr->store)
2437
ret = dev_attr->store(dev, dev_attr, buf, count);
2438
return ret;
2439
}
2440
2441
static const struct sysfs_ops dev_sysfs_ops = {
2442
.show = dev_attr_show,
2443
.store = dev_attr_store,
2444
};
2445
2446
#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
2447
2448
ssize_t device_store_ulong(struct device *dev,
2449
struct device_attribute *attr,
2450
const char *buf, size_t size)
2451
{
2452
struct dev_ext_attribute *ea = to_ext_attr(attr);
2453
int ret;
2454
unsigned long new;
2455
2456
ret = kstrtoul(buf, 0, &new);
2457
if (ret)
2458
return ret;
2459
*(unsigned long *)(ea->var) = new;
2460
/* Always return full write size even if we didn't consume all */
2461
return size;
2462
}
2463
EXPORT_SYMBOL_GPL(device_store_ulong);
2464
2465
ssize_t device_show_ulong(struct device *dev,
2466
struct device_attribute *attr,
2467
char *buf)
2468
{
2469
struct dev_ext_attribute *ea = to_ext_attr(attr);
2470
return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
2471
}
2472
EXPORT_SYMBOL_GPL(device_show_ulong);
2473
2474
ssize_t device_store_int(struct device *dev,
2475
struct device_attribute *attr,
2476
const char *buf, size_t size)
2477
{
2478
struct dev_ext_attribute *ea = to_ext_attr(attr);
2479
int ret;
2480
long new;
2481
2482
ret = kstrtol(buf, 0, &new);
2483
if (ret)
2484
return ret;
2485
2486
if (new > INT_MAX || new < INT_MIN)
2487
return -EINVAL;
2488
*(int *)(ea->var) = new;
2489
/* Always return full write size even if we didn't consume all */
2490
return size;
2491
}
2492
EXPORT_SYMBOL_GPL(device_store_int);
2493
2494
ssize_t device_show_int(struct device *dev,
2495
struct device_attribute *attr,
2496
char *buf)
2497
{
2498
struct dev_ext_attribute *ea = to_ext_attr(attr);
2499
2500
return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
2501
}
2502
EXPORT_SYMBOL_GPL(device_show_int);
2503
2504
ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
2505
const char *buf, size_t size)
2506
{
2507
struct dev_ext_attribute *ea = to_ext_attr(attr);
2508
2509
if (kstrtobool(buf, ea->var) < 0)
2510
return -EINVAL;
2511
2512
return size;
2513
}
2514
EXPORT_SYMBOL_GPL(device_store_bool);
2515
2516
ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
2517
char *buf)
2518
{
2519
struct dev_ext_attribute *ea = to_ext_attr(attr);
2520
2521
return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
2522
}
2523
EXPORT_SYMBOL_GPL(device_show_bool);
2524
2525
ssize_t device_show_string(struct device *dev,
2526
struct device_attribute *attr, char *buf)
2527
{
2528
struct dev_ext_attribute *ea = to_ext_attr(attr);
2529
2530
return sysfs_emit(buf, "%s\n", (char *)ea->var);
2531
}
2532
EXPORT_SYMBOL_GPL(device_show_string);
2533
2534
/**
2535
* device_release - free device structure.
2536
* @kobj: device's kobject.
2537
*
2538
* This is called once the reference count for the object
2539
* reaches 0. We forward the call to the device's release
2540
* method, which should handle actually freeing the structure.
2541
*/
2542
static void device_release(struct kobject *kobj)
2543
{
2544
struct device *dev = kobj_to_dev(kobj);
2545
struct device_private *p = dev->p;
2546
2547
/*
2548
* Some platform devices are driven without driver attached
2549
* and managed resources may have been acquired. Make sure
2550
* all resources are released.
2551
*
2552
* Drivers still can add resources into device after device
2553
* is deleted but alive, so release devres here to avoid
2554
* possible memory leak.
2555
*/
2556
devres_release_all(dev);
2557
2558
kfree(dev->dma_range_map);
2559
2560
if (dev->release)
2561
dev->release(dev);
2562
else if (dev->type && dev->type->release)
2563
dev->type->release(dev);
2564
else if (dev->class && dev->class->dev_release)
2565
dev->class->dev_release(dev);
2566
else
2567
WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
2568
dev_name(dev));
2569
kfree(p);
2570
}
2571
2572
static const void *device_namespace(const struct kobject *kobj)
2573
{
2574
const struct device *dev = kobj_to_dev(kobj);
2575
const void *ns = NULL;
2576
2577
if (dev->class && dev->class->namespace)
2578
ns = dev->class->namespace(dev);
2579
2580
return ns;
2581
}
2582
2583
static void device_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
2584
{
2585
const struct device *dev = kobj_to_dev(kobj);
2586
2587
if (dev->class && dev->class->get_ownership)
2588
dev->class->get_ownership(dev, uid, gid);
2589
}
2590
2591
static const struct kobj_type device_ktype = {
2592
.release = device_release,
2593
.sysfs_ops = &dev_sysfs_ops,
2594
.namespace = device_namespace,
2595
.get_ownership = device_get_ownership,
2596
};
2597
2598
2599
static int dev_uevent_filter(const struct kobject *kobj)
2600
{
2601
const struct kobj_type *ktype = get_ktype(kobj);
2602
2603
if (ktype == &device_ktype) {
2604
const struct device *dev = kobj_to_dev(kobj);
2605
if (dev->bus)
2606
return 1;
2607
if (dev->class)
2608
return 1;
2609
}
2610
return 0;
2611
}
2612
2613
static const char *dev_uevent_name(const struct kobject *kobj)
2614
{
2615
const struct device *dev = kobj_to_dev(kobj);
2616
2617
if (dev->bus)
2618
return dev->bus->name;
2619
if (dev->class)
2620
return dev->class->name;
2621
return NULL;
2622
}
2623
2624
/*
2625
* Try filling "DRIVER=<name>" uevent variable for a device. Because this
2626
* function may race with binding and unbinding the device from a driver,
2627
* we need to be careful. Binding is generally safe, at worst we miss the
2628
* fact that the device is already bound to a driver (but the driver
2629
* information that is delivered through uevents is best-effort, it may
2630
* become obsolete as soon as it is generated anyways). Unbinding is more
2631
* risky as driver pointer is transitioning to NULL, so READ_ONCE() should
2632
* be used to make sure we are dealing with the same pointer, and to
2633
* ensure that driver structure is not going to disappear from under us
2634
* we take bus' drivers klist lock. The assumption that only registered
2635
* driver can be bound to a device, and to unregister a driver bus code
2636
* will take the same lock.
2637
*/
2638
static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env)
2639
{
2640
struct subsys_private *sp = bus_to_subsys(dev->bus);
2641
2642
if (sp) {
2643
scoped_guard(spinlock, &sp->klist_drivers.k_lock) {
2644
struct device_driver *drv = READ_ONCE(dev->driver);
2645
if (drv)
2646
add_uevent_var(env, "DRIVER=%s", drv->name);
2647
}
2648
2649
subsys_put(sp);
2650
}
2651
}
2652
2653
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
2654
{
2655
const struct device *dev = kobj_to_dev(kobj);
2656
int retval = 0;
2657
2658
/* add device node properties if present */
2659
if (MAJOR(dev->devt)) {
2660
const char *tmp;
2661
const char *name;
2662
umode_t mode = 0;
2663
kuid_t uid = GLOBAL_ROOT_UID;
2664
kgid_t gid = GLOBAL_ROOT_GID;
2665
2666
add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
2667
add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
2668
name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
2669
if (name) {
2670
add_uevent_var(env, "DEVNAME=%s", name);
2671
if (mode)
2672
add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
2673
if (!uid_eq(uid, GLOBAL_ROOT_UID))
2674
add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
2675
if (!gid_eq(gid, GLOBAL_ROOT_GID))
2676
add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
2677
kfree(tmp);
2678
}
2679
}
2680
2681
if (dev->type && dev->type->name)
2682
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
2683
2684
/* Add "DRIVER=%s" variable if the device is bound to a driver */
2685
dev_driver_uevent(dev, env);
2686
2687
/* Add common DT information about the device */
2688
of_device_uevent(dev, env);
2689
2690
/* have the bus specific function add its stuff */
2691
if (dev->bus && dev->bus->uevent) {
2692
retval = dev->bus->uevent(dev, env);
2693
if (retval)
2694
pr_debug("device: '%s': %s: bus uevent() returned %d\n",
2695
dev_name(dev), __func__, retval);
2696
}
2697
2698
/* have the class specific function add its stuff */
2699
if (dev->class && dev->class->dev_uevent) {
2700
retval = dev->class->dev_uevent(dev, env);
2701
if (retval)
2702
pr_debug("device: '%s': %s: class uevent() "
2703
"returned %d\n", dev_name(dev),
2704
__func__, retval);
2705
}
2706
2707
/* have the device type specific function add its stuff */
2708
if (dev->type && dev->type->uevent) {
2709
retval = dev->type->uevent(dev, env);
2710
if (retval)
2711
pr_debug("device: '%s': %s: dev_type uevent() "
2712
"returned %d\n", dev_name(dev),
2713
__func__, retval);
2714
}
2715
2716
return retval;
2717
}
2718
2719
static const struct kset_uevent_ops device_uevent_ops = {
2720
.filter = dev_uevent_filter,
2721
.name = dev_uevent_name,
2722
.uevent = dev_uevent,
2723
};
2724
2725
static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
2726
char *buf)
2727
{
2728
struct kobject *top_kobj;
2729
struct kset *kset;
2730
struct kobj_uevent_env *env = NULL;
2731
int i;
2732
int len = 0;
2733
int retval;
2734
2735
/* search the kset, the device belongs to */
2736
top_kobj = &dev->kobj;
2737
while (!top_kobj->kset && top_kobj->parent)
2738
top_kobj = top_kobj->parent;
2739
if (!top_kobj->kset)
2740
goto out;
2741
2742
kset = top_kobj->kset;
2743
if (!kset->uevent_ops || !kset->uevent_ops->uevent)
2744
goto out;
2745
2746
/* respect filter */
2747
if (kset->uevent_ops && kset->uevent_ops->filter)
2748
if (!kset->uevent_ops->filter(&dev->kobj))
2749
goto out;
2750
2751
env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
2752
if (!env)
2753
return -ENOMEM;
2754
2755
/* let the kset specific function add its keys */
2756
retval = kset->uevent_ops->uevent(&dev->kobj, env);
2757
if (retval)
2758
goto out;
2759
2760
/* copy keys to file */
2761
for (i = 0; i < env->envp_idx; i++)
2762
len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
2763
out:
2764
kfree(env);
2765
return len;
2766
}
2767
2768
static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
2769
const char *buf, size_t count)
2770
{
2771
int rc;
2772
2773
rc = kobject_synth_uevent(&dev->kobj, buf, count);
2774
2775
if (rc) {
2776
dev_err(dev, "uevent: failed to send synthetic uevent: %d\n", rc);
2777
return rc;
2778
}
2779
2780
return count;
2781
}
2782
static DEVICE_ATTR_RW(uevent);
2783
2784
static ssize_t online_show(struct device *dev, struct device_attribute *attr,
2785
char *buf)
2786
{
2787
bool val;
2788
2789
device_lock(dev);
2790
val = !dev->offline;
2791
device_unlock(dev);
2792
return sysfs_emit(buf, "%u\n", val);
2793
}
2794
2795
static ssize_t online_store(struct device *dev, struct device_attribute *attr,
2796
const char *buf, size_t count)
2797
{
2798
bool val;
2799
int ret;
2800
2801
ret = kstrtobool(buf, &val);
2802
if (ret < 0)
2803
return ret;
2804
2805
ret = lock_device_hotplug_sysfs();
2806
if (ret)
2807
return ret;
2808
2809
ret = val ? device_online(dev) : device_offline(dev);
2810
unlock_device_hotplug();
2811
return ret < 0 ? ret : count;
2812
}
2813
static DEVICE_ATTR_RW(online);
2814
2815
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
2816
char *buf)
2817
{
2818
const char *loc;
2819
2820
switch (dev->removable) {
2821
case DEVICE_REMOVABLE:
2822
loc = "removable";
2823
break;
2824
case DEVICE_FIXED:
2825
loc = "fixed";
2826
break;
2827
default:
2828
loc = "unknown";
2829
}
2830
return sysfs_emit(buf, "%s\n", loc);
2831
}
2832
static DEVICE_ATTR_RO(removable);
2833
2834
int device_add_groups(struct device *dev, const struct attribute_group **groups)
2835
{
2836
return sysfs_create_groups(&dev->kobj, groups);
2837
}
2838
EXPORT_SYMBOL_GPL(device_add_groups);
2839
2840
void device_remove_groups(struct device *dev,
2841
const struct attribute_group **groups)
2842
{
2843
sysfs_remove_groups(&dev->kobj, groups);
2844
}
2845
EXPORT_SYMBOL_GPL(device_remove_groups);
2846
2847
union device_attr_group_devres {
2848
const struct attribute_group *group;
2849
const struct attribute_group **groups;
2850
};
2851
2852
static void devm_attr_group_remove(struct device *dev, void *res)
2853
{
2854
union device_attr_group_devres *devres = res;
2855
const struct attribute_group *group = devres->group;
2856
2857
dev_dbg(dev, "%s: removing group %p\n", __func__, group);
2858
sysfs_remove_group(&dev->kobj, group);
2859
}
2860
2861
/**
2862
* devm_device_add_group - given a device, create a managed attribute group
2863
* @dev: The device to create the group for
2864
* @grp: The attribute group to create
2865
*
2866
* This function creates a group for the first time. It will explicitly
2867
* warn and error if any of the attribute files being created already exist.
2868
*
2869
* Returns 0 on success or error code on failure.
2870
*/
2871
int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
2872
{
2873
union device_attr_group_devres *devres;
2874
int error;
2875
2876
devres = devres_alloc(devm_attr_group_remove,
2877
sizeof(*devres), GFP_KERNEL);
2878
if (!devres)
2879
return -ENOMEM;
2880
2881
error = sysfs_create_group(&dev->kobj, grp);
2882
if (error) {
2883
devres_free(devres);
2884
return error;
2885
}
2886
2887
devres->group = grp;
2888
devres_add(dev, devres);
2889
return 0;
2890
}
2891
EXPORT_SYMBOL_GPL(devm_device_add_group);
2892
2893
static int device_add_attrs(struct device *dev)
2894
{
2895
const struct class *class = dev->class;
2896
const struct device_type *type = dev->type;
2897
int error;
2898
2899
if (class) {
2900
error = device_add_groups(dev, class->dev_groups);
2901
if (error)
2902
return error;
2903
}
2904
2905
if (type) {
2906
error = device_add_groups(dev, type->groups);
2907
if (error)
2908
goto err_remove_class_groups;
2909
}
2910
2911
error = device_add_groups(dev, dev->groups);
2912
if (error)
2913
goto err_remove_type_groups;
2914
2915
if (device_supports_offline(dev) && !dev->offline_disabled) {
2916
error = device_create_file(dev, &dev_attr_online);
2917
if (error)
2918
goto err_remove_dev_groups;
2919
}
2920
2921
if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) {
2922
error = device_create_file(dev, &dev_attr_waiting_for_supplier);
2923
if (error)
2924
goto err_remove_dev_online;
2925
}
2926
2927
if (dev_removable_is_valid(dev)) {
2928
error = device_create_file(dev, &dev_attr_removable);
2929
if (error)
2930
goto err_remove_dev_waiting_for_supplier;
2931
}
2932
2933
if (dev_add_physical_location(dev)) {
2934
error = device_add_group(dev,
2935
&dev_attr_physical_location_group);
2936
if (error)
2937
goto err_remove_dev_removable;
2938
}
2939
2940
return 0;
2941
2942
err_remove_dev_removable:
2943
device_remove_file(dev, &dev_attr_removable);
2944
err_remove_dev_waiting_for_supplier:
2945
device_remove_file(dev, &dev_attr_waiting_for_supplier);
2946
err_remove_dev_online:
2947
device_remove_file(dev, &dev_attr_online);
2948
err_remove_dev_groups:
2949
device_remove_groups(dev, dev->groups);
2950
err_remove_type_groups:
2951
if (type)
2952
device_remove_groups(dev, type->groups);
2953
err_remove_class_groups:
2954
if (class)
2955
device_remove_groups(dev, class->dev_groups);
2956
2957
return error;
2958
}
2959
2960
static void device_remove_attrs(struct device *dev)
2961
{
2962
const struct class *class = dev->class;
2963
const struct device_type *type = dev->type;
2964
2965
if (dev->physical_location) {
2966
device_remove_group(dev, &dev_attr_physical_location_group);
2967
kfree(dev->physical_location);
2968
}
2969
2970
device_remove_file(dev, &dev_attr_removable);
2971
device_remove_file(dev, &dev_attr_waiting_for_supplier);
2972
device_remove_file(dev, &dev_attr_online);
2973
device_remove_groups(dev, dev->groups);
2974
2975
if (type)
2976
device_remove_groups(dev, type->groups);
2977
2978
if (class)
2979
device_remove_groups(dev, class->dev_groups);
2980
}
2981
2982
static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
2983
char *buf)
2984
{
2985
return print_dev_t(buf, dev->devt);
2986
}
2987
static DEVICE_ATTR_RO(dev);
2988
2989
/* /sys/devices/ */
2990
struct kset *devices_kset;
2991
2992
/**
2993
* devices_kset_move_before - Move device in the devices_kset's list.
2994
* @deva: Device to move.
2995
* @devb: Device @deva should come before.
2996
*/
2997
static void devices_kset_move_before(struct device *deva, struct device *devb)
2998
{
2999
if (!devices_kset)
3000
return;
3001
pr_debug("devices_kset: Moving %s before %s\n",
3002
dev_name(deva), dev_name(devb));
3003
spin_lock(&devices_kset->list_lock);
3004
list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
3005
spin_unlock(&devices_kset->list_lock);
3006
}
3007
3008
/**
3009
* devices_kset_move_after - Move device in the devices_kset's list.
3010
* @deva: Device to move
3011
* @devb: Device @deva should come after.
3012
*/
3013
static void devices_kset_move_after(struct device *deva, struct device *devb)
3014
{
3015
if (!devices_kset)
3016
return;
3017
pr_debug("devices_kset: Moving %s after %s\n",
3018
dev_name(deva), dev_name(devb));
3019
spin_lock(&devices_kset->list_lock);
3020
list_move(&deva->kobj.entry, &devb->kobj.entry);
3021
spin_unlock(&devices_kset->list_lock);
3022
}
3023
3024
/**
3025
* devices_kset_move_last - move the device to the end of devices_kset's list.
3026
* @dev: device to move
3027
*/
3028
void devices_kset_move_last(struct device *dev)
3029
{
3030
if (!devices_kset)
3031
return;
3032
pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
3033
spin_lock(&devices_kset->list_lock);
3034
list_move_tail(&dev->kobj.entry, &devices_kset->list);
3035
spin_unlock(&devices_kset->list_lock);
3036
}
3037
3038
/**
3039
* device_create_file - create sysfs attribute file for device.
3040
* @dev: device.
3041
* @attr: device attribute descriptor.
3042
*/
3043
int device_create_file(struct device *dev,
3044
const struct device_attribute *attr)
3045
{
3046
int error = 0;
3047
3048
if (dev) {
3049
WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
3050
"Attribute %s: write permission without 'store'\n",
3051
attr->attr.name);
3052
WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
3053
"Attribute %s: read permission without 'show'\n",
3054
attr->attr.name);
3055
error = sysfs_create_file(&dev->kobj, &attr->attr);
3056
}
3057
3058
return error;
3059
}
3060
EXPORT_SYMBOL_GPL(device_create_file);
3061
3062
/**
3063
* device_remove_file - remove sysfs attribute file.
3064
* @dev: device.
3065
* @attr: device attribute descriptor.
3066
*/
3067
void device_remove_file(struct device *dev,
3068
const struct device_attribute *attr)
3069
{
3070
if (dev)
3071
sysfs_remove_file(&dev->kobj, &attr->attr);
3072
}
3073
EXPORT_SYMBOL_GPL(device_remove_file);
3074
3075
/**
3076
* device_remove_file_self - remove sysfs attribute file from its own method.
3077
* @dev: device.
3078
* @attr: device attribute descriptor.
3079
*
3080
* See kernfs_remove_self() for details.
3081
*/
3082
bool device_remove_file_self(struct device *dev,
3083
const struct device_attribute *attr)
3084
{
3085
if (dev)
3086
return sysfs_remove_file_self(&dev->kobj, &attr->attr);
3087
else
3088
return false;
3089
}
3090
EXPORT_SYMBOL_GPL(device_remove_file_self);
3091
3092
/**
3093
* device_create_bin_file - create sysfs binary attribute file for device.
3094
* @dev: device.
3095
* @attr: device binary attribute descriptor.
3096
*/
3097
int device_create_bin_file(struct device *dev,
3098
const struct bin_attribute *attr)
3099
{
3100
int error = -EINVAL;
3101
if (dev)
3102
error = sysfs_create_bin_file(&dev->kobj, attr);
3103
return error;
3104
}
3105
EXPORT_SYMBOL_GPL(device_create_bin_file);
3106
3107
/**
3108
* device_remove_bin_file - remove sysfs binary attribute file
3109
* @dev: device.
3110
* @attr: device binary attribute descriptor.
3111
*/
3112
void device_remove_bin_file(struct device *dev,
3113
const struct bin_attribute *attr)
3114
{
3115
if (dev)
3116
sysfs_remove_bin_file(&dev->kobj, attr);
3117
}
3118
EXPORT_SYMBOL_GPL(device_remove_bin_file);
3119
3120
static void klist_children_get(struct klist_node *n)
3121
{
3122
struct device_private *p = to_device_private_parent(n);
3123
struct device *dev = p->device;
3124
3125
get_device(dev);
3126
}
3127
3128
static void klist_children_put(struct klist_node *n)
3129
{
3130
struct device_private *p = to_device_private_parent(n);
3131
struct device *dev = p->device;
3132
3133
put_device(dev);
3134
}
3135
3136
/**
3137
* device_initialize - init device structure.
3138
* @dev: device.
3139
*
3140
* This prepares the device for use by other layers by initializing
3141
* its fields.
3142
* It is the first half of device_register(), if called by
3143
* that function, though it can also be called separately, so one
3144
* may use @dev's fields. In particular, get_device()/put_device()
3145
* may be used for reference counting of @dev after calling this
3146
* function.
3147
*
3148
* All fields in @dev must be initialized by the caller to 0, except
3149
* for those explicitly set to some other value. The simplest
3150
* approach is to use kzalloc() to allocate the structure containing
3151
* @dev.
3152
*
3153
* NOTE: Use put_device() to give up your reference instead of freeing
3154
* @dev directly once you have called this function.
3155
*/
3156
void device_initialize(struct device *dev)
3157
{
3158
dev->kobj.kset = devices_kset;
3159
kobject_init(&dev->kobj, &device_ktype);
3160
INIT_LIST_HEAD(&dev->dma_pools);
3161
mutex_init(&dev->mutex);
3162
lockdep_set_novalidate_class(&dev->mutex);
3163
spin_lock_init(&dev->devres_lock);
3164
INIT_LIST_HEAD(&dev->devres_head);
3165
device_pm_init(dev);
3166
set_dev_node(dev, NUMA_NO_NODE);
3167
INIT_LIST_HEAD(&dev->links.consumers);
3168
INIT_LIST_HEAD(&dev->links.suppliers);
3169
INIT_LIST_HEAD(&dev->links.defer_sync);
3170
dev->links.status = DL_DEV_NO_DRIVER;
3171
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
3172
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
3173
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
3174
dev->dma_coherent = dma_default_coherent;
3175
#endif
3176
swiotlb_dev_init(dev);
3177
}
3178
EXPORT_SYMBOL_GPL(device_initialize);
3179
3180
struct kobject *virtual_device_parent(void)
3181
{
3182
static struct kobject *virtual_dir = NULL;
3183
3184
if (!virtual_dir)
3185
virtual_dir = kobject_create_and_add("virtual",
3186
&devices_kset->kobj);
3187
3188
return virtual_dir;
3189
}
3190
3191
struct class_dir {
3192
struct kobject kobj;
3193
const struct class *class;
3194
};
3195
3196
#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
3197
3198
static void class_dir_release(struct kobject *kobj)
3199
{
3200
struct class_dir *dir = to_class_dir(kobj);
3201
kfree(dir);
3202
}
3203
3204
static const
3205
struct kobj_ns_type_operations *class_dir_child_ns_type(const struct kobject *kobj)
3206
{
3207
const struct class_dir *dir = to_class_dir(kobj);
3208
return dir->class->ns_type;
3209
}
3210
3211
static const struct kobj_type class_dir_ktype = {
3212
.release = class_dir_release,
3213
.sysfs_ops = &kobj_sysfs_ops,
3214
.child_ns_type = class_dir_child_ns_type
3215
};
3216
3217
static struct kobject *class_dir_create_and_add(struct subsys_private *sp,
3218
struct kobject *parent_kobj)
3219
{
3220
struct class_dir *dir;
3221
int retval;
3222
3223
dir = kzalloc(sizeof(*dir), GFP_KERNEL);
3224
if (!dir)
3225
return ERR_PTR(-ENOMEM);
3226
3227
dir->class = sp->class;
3228
kobject_init(&dir->kobj, &class_dir_ktype);
3229
3230
dir->kobj.kset = &sp->glue_dirs;
3231
3232
retval = kobject_add(&dir->kobj, parent_kobj, "%s", sp->class->name);
3233
if (retval < 0) {
3234
kobject_put(&dir->kobj);
3235
return ERR_PTR(retval);
3236
}
3237
return &dir->kobj;
3238
}
3239
3240
static DEFINE_MUTEX(gdp_mutex);
3241
3242
static struct kobject *get_device_parent(struct device *dev,
3243
struct device *parent)
3244
{
3245
struct subsys_private *sp = class_to_subsys(dev->class);
3246
struct kobject *kobj = NULL;
3247
3248
if (sp) {
3249
struct kobject *parent_kobj;
3250
struct kobject *k;
3251
3252
/*
3253
* If we have no parent, we live in "virtual".
3254
* Class-devices with a non class-device as parent, live
3255
* in a "glue" directory to prevent namespace collisions.
3256
*/
3257
if (parent == NULL)
3258
parent_kobj = virtual_device_parent();
3259
else if (parent->class && !dev->class->ns_type) {
3260
subsys_put(sp);
3261
return &parent->kobj;
3262
} else {
3263
parent_kobj = &parent->kobj;
3264
}
3265
3266
mutex_lock(&gdp_mutex);
3267
3268
/* find our class-directory at the parent and reference it */
3269
spin_lock(&sp->glue_dirs.list_lock);
3270
list_for_each_entry(k, &sp->glue_dirs.list, entry)
3271
if (k->parent == parent_kobj) {
3272
kobj = kobject_get(k);
3273
break;
3274
}
3275
spin_unlock(&sp->glue_dirs.list_lock);
3276
if (kobj) {
3277
mutex_unlock(&gdp_mutex);
3278
subsys_put(sp);
3279
return kobj;
3280
}
3281
3282
/* or create a new class-directory at the parent device */
3283
k = class_dir_create_and_add(sp, parent_kobj);
3284
/* do not emit an uevent for this simple "glue" directory */
3285
mutex_unlock(&gdp_mutex);
3286
subsys_put(sp);
3287
return k;
3288
}
3289
3290
/* subsystems can specify a default root directory for their devices */
3291
if (!parent && dev->bus) {
3292
struct device *dev_root = bus_get_dev_root(dev->bus);
3293
3294
if (dev_root) {
3295
kobj = &dev_root->kobj;
3296
put_device(dev_root);
3297
return kobj;
3298
}
3299
}
3300
3301
if (parent)
3302
return &parent->kobj;
3303
return NULL;
3304
}
3305
3306
static inline bool live_in_glue_dir(struct kobject *kobj,
3307
struct device *dev)
3308
{
3309
struct subsys_private *sp;
3310
bool retval;
3311
3312
if (!kobj || !dev->class)
3313
return false;
3314
3315
sp = class_to_subsys(dev->class);
3316
if (!sp)
3317
return false;
3318
3319
if (kobj->kset == &sp->glue_dirs)
3320
retval = true;
3321
else
3322
retval = false;
3323
3324
subsys_put(sp);
3325
return retval;
3326
}
3327
3328
static inline struct kobject *get_glue_dir(struct device *dev)
3329
{
3330
return dev->kobj.parent;
3331
}
3332
3333
/**
3334
* kobject_has_children - Returns whether a kobject has children.
3335
* @kobj: the object to test
3336
*
3337
* This will return whether a kobject has other kobjects as children.
3338
*
3339
* It does NOT account for the presence of attribute files, only sub
3340
* directories. It also assumes there is no concurrent addition or
3341
* removal of such children, and thus relies on external locking.
3342
*/
3343
static inline bool kobject_has_children(struct kobject *kobj)
3344
{
3345
WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
3346
3347
return kobj->sd && kobj->sd->dir.subdirs;
3348
}
3349
3350
/*
3351
* make sure cleaning up dir as the last step, we need to make
3352
* sure .release handler of kobject is run with holding the
3353
* global lock
3354
*/
3355
static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
3356
{
3357
unsigned int ref;
3358
3359
/* see if we live in a "glue" directory */
3360
if (!live_in_glue_dir(glue_dir, dev))
3361
return;
3362
3363
mutex_lock(&gdp_mutex);
3364
/**
3365
* There is a race condition between removing glue directory
3366
* and adding a new device under the glue directory.
3367
*
3368
* CPU1: CPU2:
3369
*
3370
* device_add()
3371
* get_device_parent()
3372
* class_dir_create_and_add()
3373
* kobject_add_internal()
3374
* create_dir() // create glue_dir
3375
*
3376
* device_add()
3377
* get_device_parent()
3378
* kobject_get() // get glue_dir
3379
*
3380
* device_del()
3381
* cleanup_glue_dir()
3382
* kobject_del(glue_dir)
3383
*
3384
* kobject_add()
3385
* kobject_add_internal()
3386
* create_dir() // in glue_dir
3387
* sysfs_create_dir_ns()
3388
* kernfs_create_dir_ns(sd)
3389
*
3390
* sysfs_remove_dir() // glue_dir->sd=NULL
3391
* sysfs_put() // free glue_dir->sd
3392
*
3393
* // sd is freed
3394
* kernfs_new_node(sd)
3395
* kernfs_get(glue_dir)
3396
* kernfs_add_one()
3397
* kernfs_put()
3398
*
3399
* Before CPU1 remove last child device under glue dir, if CPU2 add
3400
* a new device under glue dir, the glue_dir kobject reference count
3401
* will be increase to 2 in kobject_get(k). And CPU2 has been called
3402
* kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
3403
* and sysfs_put(). This result in glue_dir->sd is freed.
3404
*
3405
* Then the CPU2 will see a stale "empty" but still potentially used
3406
* glue dir around in kernfs_new_node().
3407
*
3408
* In order to avoid this happening, we also should make sure that
3409
* kernfs_node for glue_dir is released in CPU1 only when refcount
3410
* for glue_dir kobj is 1.
3411
*/
3412
ref = kref_read(&glue_dir->kref);
3413
if (!kobject_has_children(glue_dir) && !--ref)
3414
kobject_del(glue_dir);
3415
kobject_put(glue_dir);
3416
mutex_unlock(&gdp_mutex);
3417
}
3418
3419
static int device_add_class_symlinks(struct device *dev)
3420
{
3421
struct device_node *of_node = dev_of_node(dev);
3422
struct subsys_private *sp;
3423
int error;
3424
3425
if (of_node) {
3426
error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
3427
if (error)
3428
dev_warn(dev, "Error %d creating of_node link\n",error);
3429
/* An error here doesn't warrant bringing down the device */
3430
}
3431
3432
sp = class_to_subsys(dev->class);
3433
if (!sp)
3434
return 0;
3435
3436
error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem");
3437
if (error)
3438
goto out_devnode;
3439
3440
if (dev->parent && device_is_not_partition(dev)) {
3441
error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
3442
"device");
3443
if (error)
3444
goto out_subsys;
3445
}
3446
3447
/* link in the class directory pointing to the device */
3448
error = sysfs_create_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev));
3449
if (error)
3450
goto out_device;
3451
goto exit;
3452
3453
out_device:
3454
sysfs_remove_link(&dev->kobj, "device");
3455
out_subsys:
3456
sysfs_remove_link(&dev->kobj, "subsystem");
3457
out_devnode:
3458
sysfs_remove_link(&dev->kobj, "of_node");
3459
exit:
3460
subsys_put(sp);
3461
return error;
3462
}
3463
3464
static void device_remove_class_symlinks(struct device *dev)
3465
{
3466
struct subsys_private *sp = class_to_subsys(dev->class);
3467
3468
if (dev_of_node(dev))
3469
sysfs_remove_link(&dev->kobj, "of_node");
3470
3471
if (!sp)
3472
return;
3473
3474
if (dev->parent && device_is_not_partition(dev))
3475
sysfs_remove_link(&dev->kobj, "device");
3476
sysfs_remove_link(&dev->kobj, "subsystem");
3477
sysfs_delete_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev));
3478
subsys_put(sp);
3479
}
3480
3481
/**
3482
* dev_set_name - set a device name
3483
* @dev: device
3484
* @fmt: format string for the device's name
3485
*/
3486
int dev_set_name(struct device *dev, const char *fmt, ...)
3487
{
3488
va_list vargs;
3489
int err;
3490
3491
va_start(vargs, fmt);
3492
err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
3493
va_end(vargs);
3494
return err;
3495
}
3496
EXPORT_SYMBOL_GPL(dev_set_name);
3497
3498
/* select a /sys/dev/ directory for the device */
3499
static struct kobject *device_to_dev_kobj(struct device *dev)
3500
{
3501
if (is_blockdev(dev))
3502
return sysfs_dev_block_kobj;
3503
else
3504
return sysfs_dev_char_kobj;
3505
}
3506
3507
static int device_create_sys_dev_entry(struct device *dev)
3508
{
3509
struct kobject *kobj = device_to_dev_kobj(dev);
3510
int error = 0;
3511
char devt_str[15];
3512
3513
if (kobj) {
3514
format_dev_t(devt_str, dev->devt);
3515
error = sysfs_create_link(kobj, &dev->kobj, devt_str);
3516
}
3517
3518
return error;
3519
}
3520
3521
static void device_remove_sys_dev_entry(struct device *dev)
3522
{
3523
struct kobject *kobj = device_to_dev_kobj(dev);
3524
char devt_str[15];
3525
3526
if (kobj) {
3527
format_dev_t(devt_str, dev->devt);
3528
sysfs_remove_link(kobj, devt_str);
3529
}
3530
}
3531
3532
static int device_private_init(struct device *dev)
3533
{
3534
dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
3535
if (!dev->p)
3536
return -ENOMEM;
3537
dev->p->device = dev;
3538
klist_init(&dev->p->klist_children, klist_children_get,
3539
klist_children_put);
3540
INIT_LIST_HEAD(&dev->p->deferred_probe);
3541
return 0;
3542
}
3543
3544
/**
3545
* device_add - add device to device hierarchy.
3546
* @dev: device.
3547
*
3548
* This is part 2 of device_register(), though may be called
3549
* separately _iff_ device_initialize() has been called separately.
3550
*
3551
* This adds @dev to the kobject hierarchy via kobject_add(), adds it
3552
* to the global and sibling lists for the device, then
3553
* adds it to the other relevant subsystems of the driver model.
3554
*
3555
* Do not call this routine or device_register() more than once for
3556
* any device structure. The driver model core is not designed to work
3557
* with devices that get unregistered and then spring back to life.
3558
* (Among other things, it's very hard to guarantee that all references
3559
* to the previous incarnation of @dev have been dropped.) Allocate
3560
* and register a fresh new struct device instead.
3561
*
3562
* NOTE: _Never_ directly free @dev after calling this function, even
3563
* if it returned an error! Always use put_device() to give up your
3564
* reference instead.
3565
*
3566
* Rule of thumb is: if device_add() succeeds, you should call
3567
* device_del() when you want to get rid of it. If device_add() has
3568
* *not* succeeded, use *only* put_device() to drop the reference
3569
* count.
3570
*/
3571
int device_add(struct device *dev)
3572
{
3573
struct subsys_private *sp;
3574
struct device *parent;
3575
struct kobject *kobj;
3576
struct class_interface *class_intf;
3577
int error = -EINVAL;
3578
struct kobject *glue_dir = NULL;
3579
3580
dev = get_device(dev);
3581
if (!dev)
3582
goto done;
3583
3584
if (!dev->p) {
3585
error = device_private_init(dev);
3586
if (error)
3587
goto done;
3588
}
3589
3590
/*
3591
* for statically allocated devices, which should all be converted
3592
* some day, we need to initialize the name. We prevent reading back
3593
* the name, and force the use of dev_name()
3594
*/
3595
if (dev->init_name) {
3596
error = dev_set_name(dev, "%s", dev->init_name);
3597
dev->init_name = NULL;
3598
}
3599
3600
if (dev_name(dev))
3601
error = 0;
3602
/* subsystems can specify simple device enumeration */
3603
else if (dev->bus && dev->bus->dev_name)
3604
error = dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
3605
else
3606
error = -EINVAL;
3607
if (error)
3608
goto name_error;
3609
3610
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3611
3612
parent = get_device(dev->parent);
3613
kobj = get_device_parent(dev, parent);
3614
if (IS_ERR(kobj)) {
3615
error = PTR_ERR(kobj);
3616
goto parent_error;
3617
}
3618
if (kobj)
3619
dev->kobj.parent = kobj;
3620
3621
/* use parent numa_node */
3622
if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
3623
set_dev_node(dev, dev_to_node(parent));
3624
3625
/* first, register with generic layer. */
3626
/* we require the name to be set before, and pass NULL */
3627
error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
3628
if (error) {
3629
glue_dir = kobj;
3630
goto Error;
3631
}
3632
3633
/* notify platform of device entry */
3634
device_platform_notify(dev);
3635
3636
error = device_create_file(dev, &dev_attr_uevent);
3637
if (error)
3638
goto attrError;
3639
3640
error = device_add_class_symlinks(dev);
3641
if (error)
3642
goto SymlinkError;
3643
error = device_add_attrs(dev);
3644
if (error)
3645
goto AttrsError;
3646
error = bus_add_device(dev);
3647
if (error)
3648
goto BusError;
3649
error = dpm_sysfs_add(dev);
3650
if (error)
3651
goto DPMError;
3652
device_pm_add(dev);
3653
3654
if (MAJOR(dev->devt)) {
3655
error = device_create_file(dev, &dev_attr_dev);
3656
if (error)
3657
goto DevAttrError;
3658
3659
error = device_create_sys_dev_entry(dev);
3660
if (error)
3661
goto SysEntryError;
3662
3663
devtmpfs_create_node(dev);
3664
}
3665
3666
/* Notify clients of device addition. This call must come
3667
* after dpm_sysfs_add() and before kobject_uevent().
3668
*/
3669
bus_notify(dev, BUS_NOTIFY_ADD_DEVICE);
3670
kobject_uevent(&dev->kobj, KOBJ_ADD);
3671
3672
/*
3673
* Check if any of the other devices (consumers) have been waiting for
3674
* this device (supplier) to be added so that they can create a device
3675
* link to it.
3676
*
3677
* This needs to happen after device_pm_add() because device_link_add()
3678
* requires the supplier be registered before it's called.
3679
*
3680
* But this also needs to happen before bus_probe_device() to make sure
3681
* waiting consumers can link to it before the driver is bound to the
3682
* device and the driver sync_state callback is called for this device.
3683
*/
3684
if (dev->fwnode && !dev->fwnode->dev) {
3685
dev->fwnode->dev = dev;
3686
fw_devlink_link_device(dev);
3687
}
3688
3689
bus_probe_device(dev);
3690
3691
/*
3692
* If all driver registration is done and a newly added device doesn't
3693
* match with any driver, don't block its consumers from probing in
3694
* case the consumer device is able to operate without this supplier.
3695
*/
3696
if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match)
3697
fw_devlink_unblock_consumers(dev);
3698
3699
if (parent)
3700
klist_add_tail(&dev->p->knode_parent,
3701
&parent->p->klist_children);
3702
3703
sp = class_to_subsys(dev->class);
3704
if (sp) {
3705
mutex_lock(&sp->mutex);
3706
/* tie the class to the device */
3707
klist_add_tail(&dev->p->knode_class, &sp->klist_devices);
3708
3709
/* notify any interfaces that the device is here */
3710
list_for_each_entry(class_intf, &sp->interfaces, node)
3711
if (class_intf->add_dev)
3712
class_intf->add_dev(dev);
3713
mutex_unlock(&sp->mutex);
3714
subsys_put(sp);
3715
}
3716
done:
3717
put_device(dev);
3718
return error;
3719
SysEntryError:
3720
if (MAJOR(dev->devt))
3721
device_remove_file(dev, &dev_attr_dev);
3722
DevAttrError:
3723
device_pm_remove(dev);
3724
dpm_sysfs_remove(dev);
3725
DPMError:
3726
device_set_driver(dev, NULL);
3727
bus_remove_device(dev);
3728
BusError:
3729
device_remove_attrs(dev);
3730
AttrsError:
3731
device_remove_class_symlinks(dev);
3732
SymlinkError:
3733
device_remove_file(dev, &dev_attr_uevent);
3734
attrError:
3735
device_platform_notify_remove(dev);
3736
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3737
glue_dir = get_glue_dir(dev);
3738
kobject_del(&dev->kobj);
3739
Error:
3740
cleanup_glue_dir(dev, glue_dir);
3741
parent_error:
3742
put_device(parent);
3743
name_error:
3744
kfree(dev->p);
3745
dev->p = NULL;
3746
goto done;
3747
}
3748
EXPORT_SYMBOL_GPL(device_add);
3749
3750
/**
3751
* device_register - register a device with the system.
3752
* @dev: pointer to the device structure
3753
*
3754
* This happens in two clean steps - initialize the device
3755
* and add it to the system. The two steps can be called
3756
* separately, but this is the easiest and most common.
3757
* I.e. you should only call the two helpers separately if
3758
* have a clearly defined need to use and refcount the device
3759
* before it is added to the hierarchy.
3760
*
3761
* For more information, see the kerneldoc for device_initialize()
3762
* and device_add().
3763
*
3764
* NOTE: _Never_ directly free @dev after calling this function, even
3765
* if it returned an error! Always use put_device() to give up the
3766
* reference initialized in this function instead.
3767
*/
3768
int device_register(struct device *dev)
3769
{
3770
device_initialize(dev);
3771
return device_add(dev);
3772
}
3773
EXPORT_SYMBOL_GPL(device_register);
3774
3775
/**
3776
* get_device - increment reference count for device.
3777
* @dev: device.
3778
*
3779
* This simply forwards the call to kobject_get(), though
3780
* we do take care to provide for the case that we get a NULL
3781
* pointer passed in.
3782
*/
3783
struct device *get_device(struct device *dev)
3784
{
3785
return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
3786
}
3787
EXPORT_SYMBOL_GPL(get_device);
3788
3789
/**
3790
* put_device - decrement reference count.
3791
* @dev: device in question.
3792
*/
3793
void put_device(struct device *dev)
3794
{
3795
/* might_sleep(); */
3796
if (dev)
3797
kobject_put(&dev->kobj);
3798
}
3799
EXPORT_SYMBOL_GPL(put_device);
3800
3801
bool kill_device(struct device *dev)
3802
{
3803
/*
3804
* Require the device lock and set the "dead" flag to guarantee that
3805
* the update behavior is consistent with the other bitfields near
3806
* it and that we cannot have an asynchronous probe routine trying
3807
* to run while we are tearing out the bus/class/sysfs from
3808
* underneath the device.
3809
*/
3810
device_lock_assert(dev);
3811
3812
if (dev->p->dead)
3813
return false;
3814
dev->p->dead = true;
3815
return true;
3816
}
3817
EXPORT_SYMBOL_GPL(kill_device);
3818
3819
/**
3820
* device_del - delete device from system.
3821
* @dev: device.
3822
*
3823
* This is the first part of the device unregistration
3824
* sequence. This removes the device from the lists we control
3825
* from here, has it removed from the other driver model
3826
* subsystems it was added to in device_add(), and removes it
3827
* from the kobject hierarchy.
3828
*
3829
* NOTE: this should be called manually _iff_ device_add() was
3830
* also called manually.
3831
*/
3832
void device_del(struct device *dev)
3833
{
3834
struct subsys_private *sp;
3835
struct device *parent = dev->parent;
3836
struct kobject *glue_dir = NULL;
3837
struct class_interface *class_intf;
3838
unsigned int noio_flag;
3839
3840
device_lock(dev);
3841
kill_device(dev);
3842
device_unlock(dev);
3843
3844
if (dev->fwnode && dev->fwnode->dev == dev)
3845
dev->fwnode->dev = NULL;
3846
3847
/* Notify clients of device removal. This call must come
3848
* before dpm_sysfs_remove().
3849
*/
3850
noio_flag = memalloc_noio_save();
3851
bus_notify(dev, BUS_NOTIFY_DEL_DEVICE);
3852
3853
dpm_sysfs_remove(dev);
3854
if (parent)
3855
klist_del(&dev->p->knode_parent);
3856
if (MAJOR(dev->devt)) {
3857
devtmpfs_delete_node(dev);
3858
device_remove_sys_dev_entry(dev);
3859
device_remove_file(dev, &dev_attr_dev);
3860
}
3861
3862
sp = class_to_subsys(dev->class);
3863
if (sp) {
3864
device_remove_class_symlinks(dev);
3865
3866
mutex_lock(&sp->mutex);
3867
/* notify any interfaces that the device is now gone */
3868
list_for_each_entry(class_intf, &sp->interfaces, node)
3869
if (class_intf->remove_dev)
3870
class_intf->remove_dev(dev);
3871
/* remove the device from the class list */
3872
klist_del(&dev->p->knode_class);
3873
mutex_unlock(&sp->mutex);
3874
subsys_put(sp);
3875
}
3876
device_remove_file(dev, &dev_attr_uevent);
3877
device_remove_attrs(dev);
3878
bus_remove_device(dev);
3879
device_pm_remove(dev);
3880
driver_deferred_probe_del(dev);
3881
device_platform_notify_remove(dev);
3882
device_links_purge(dev);
3883
3884
/*
3885
* If a device does not have a driver attached, we need to clean
3886
* up any managed resources. We do this in device_release(), but
3887
* it's never called (and we leak the device) if a managed
3888
* resource holds a reference to the device. So release all
3889
* managed resources here, like we do in driver_detach(). We
3890
* still need to do so again in device_release() in case someone
3891
* adds a new resource after this point, though.
3892
*/
3893
devres_release_all(dev);
3894
3895
bus_notify(dev, BUS_NOTIFY_REMOVED_DEVICE);
3896
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
3897
glue_dir = get_glue_dir(dev);
3898
kobject_del(&dev->kobj);
3899
cleanup_glue_dir(dev, glue_dir);
3900
memalloc_noio_restore(noio_flag);
3901
put_device(parent);
3902
}
3903
EXPORT_SYMBOL_GPL(device_del);
3904
3905
/**
3906
* device_unregister - unregister device from system.
3907
* @dev: device going away.
3908
*
3909
* We do this in two parts, like we do device_register(). First,
3910
* we remove it from all the subsystems with device_del(), then
3911
* we decrement the reference count via put_device(). If that
3912
* is the final reference count, the device will be cleaned up
3913
* via device_release() above. Otherwise, the structure will
3914
* stick around until the final reference to the device is dropped.
3915
*/
3916
void device_unregister(struct device *dev)
3917
{
3918
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
3919
device_del(dev);
3920
put_device(dev);
3921
}
3922
EXPORT_SYMBOL_GPL(device_unregister);
3923
3924
static struct device *prev_device(struct klist_iter *i)
3925
{
3926
struct klist_node *n = klist_prev(i);
3927
struct device *dev = NULL;
3928
struct device_private *p;
3929
3930
if (n) {
3931
p = to_device_private_parent(n);
3932
dev = p->device;
3933
}
3934
return dev;
3935
}
3936
3937
static struct device *next_device(struct klist_iter *i)
3938
{
3939
struct klist_node *n = klist_next(i);
3940
struct device *dev = NULL;
3941
struct device_private *p;
3942
3943
if (n) {
3944
p = to_device_private_parent(n);
3945
dev = p->device;
3946
}
3947
return dev;
3948
}
3949
3950
/**
3951
* device_get_devnode - path of device node file
3952
* @dev: device
3953
* @mode: returned file access mode
3954
* @uid: returned file owner
3955
* @gid: returned file group
3956
* @tmp: possibly allocated string
3957
*
3958
* Return the relative path of a possible device node.
3959
* Non-default names may need to allocate a memory to compose
3960
* a name. This memory is returned in tmp and needs to be
3961
* freed by the caller.
3962
*/
3963
const char *device_get_devnode(const struct device *dev,
3964
umode_t *mode, kuid_t *uid, kgid_t *gid,
3965
const char **tmp)
3966
{
3967
char *s;
3968
3969
*tmp = NULL;
3970
3971
/* the device type may provide a specific name */
3972
if (dev->type && dev->type->devnode)
3973
*tmp = dev->type->devnode(dev, mode, uid, gid);
3974
if (*tmp)
3975
return *tmp;
3976
3977
/* the class may provide a specific name */
3978
if (dev->class && dev->class->devnode)
3979
*tmp = dev->class->devnode(dev, mode);
3980
if (*tmp)
3981
return *tmp;
3982
3983
/* return name without allocation, tmp == NULL */
3984
if (strchr(dev_name(dev), '!') == NULL)
3985
return dev_name(dev);
3986
3987
/* replace '!' in the name with '/' */
3988
s = kstrdup_and_replace(dev_name(dev), '!', '/', GFP_KERNEL);
3989
if (!s)
3990
return NULL;
3991
return *tmp = s;
3992
}
3993
3994
/**
3995
* device_for_each_child - device child iterator.
3996
* @parent: parent struct device.
3997
* @fn: function to be called for each device.
3998
* @data: data for the callback.
3999
*
4000
* Iterate over @parent's child devices, and call @fn for each,
4001
* passing it @data.
4002
*
4003
* We check the return of @fn each time. If it returns anything
4004
* other than 0, we break out and return that value.
4005
*/
4006
int device_for_each_child(struct device *parent, void *data,
4007
device_iter_t fn)
4008
{
4009
struct klist_iter i;
4010
struct device *child;
4011
int error = 0;
4012
4013
if (!parent || !parent->p)
4014
return 0;
4015
4016
klist_iter_init(&parent->p->klist_children, &i);
4017
while (!error && (child = next_device(&i)))
4018
error = fn(child, data);
4019
klist_iter_exit(&i);
4020
return error;
4021
}
4022
EXPORT_SYMBOL_GPL(device_for_each_child);
4023
4024
/**
4025
* device_for_each_child_reverse - device child iterator in reversed order.
4026
* @parent: parent struct device.
4027
* @fn: function to be called for each device.
4028
* @data: data for the callback.
4029
*
4030
* Iterate over @parent's child devices, and call @fn for each,
4031
* passing it @data.
4032
*
4033
* We check the return of @fn each time. If it returns anything
4034
* other than 0, we break out and return that value.
4035
*/
4036
int device_for_each_child_reverse(struct device *parent, void *data,
4037
device_iter_t fn)
4038
{
4039
struct klist_iter i;
4040
struct device *child;
4041
int error = 0;
4042
4043
if (!parent || !parent->p)
4044
return 0;
4045
4046
klist_iter_init(&parent->p->klist_children, &i);
4047
while ((child = prev_device(&i)) && !error)
4048
error = fn(child, data);
4049
klist_iter_exit(&i);
4050
return error;
4051
}
4052
EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
4053
4054
/**
4055
* device_for_each_child_reverse_from - device child iterator in reversed order.
4056
* @parent: parent struct device.
4057
* @from: optional starting point in child list
4058
* @fn: function to be called for each device.
4059
* @data: data for the callback.
4060
*
4061
* Iterate over @parent's child devices, starting at @from, and call @fn
4062
* for each, passing it @data. This helper is identical to
4063
* device_for_each_child_reverse() when @from is NULL.
4064
*
4065
* @fn is checked each iteration. If it returns anything other than 0,
4066
* iteration stop and that value is returned to the caller of
4067
* device_for_each_child_reverse_from();
4068
*/
4069
int device_for_each_child_reverse_from(struct device *parent,
4070
struct device *from, void *data,
4071
device_iter_t fn)
4072
{
4073
struct klist_iter i;
4074
struct device *child;
4075
int error = 0;
4076
4077
if (!parent || !parent->p)
4078
return 0;
4079
4080
klist_iter_init_node(&parent->p->klist_children, &i,
4081
(from ? &from->p->knode_parent : NULL));
4082
while ((child = prev_device(&i)) && !error)
4083
error = fn(child, data);
4084
klist_iter_exit(&i);
4085
return error;
4086
}
4087
EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
4088
4089
/**
4090
* device_find_child - device iterator for locating a particular device.
4091
* @parent: parent struct device
4092
* @match: Callback function to check device
4093
* @data: Data to pass to match function
4094
*
4095
* This is similar to the device_for_each_child() function above, but it
4096
* returns a reference to a device that is 'found' for later use, as
4097
* determined by the @match callback.
4098
*
4099
* The callback should return 0 if the device doesn't match and non-zero
4100
* if it does. If the callback returns non-zero and a reference to the
4101
* current device can be obtained, this function will return to the caller
4102
* and not iterate over any more devices.
4103
*
4104
* NOTE: you will need to drop the reference with put_device() after use.
4105
*/
4106
struct device *device_find_child(struct device *parent, const void *data,
4107
device_match_t match)
4108
{
4109
struct klist_iter i;
4110
struct device *child;
4111
4112
if (!parent || !parent->p)
4113
return NULL;
4114
4115
klist_iter_init(&parent->p->klist_children, &i);
4116
while ((child = next_device(&i))) {
4117
if (match(child, data)) {
4118
get_device(child);
4119
break;
4120
}
4121
}
4122
klist_iter_exit(&i);
4123
return child;
4124
}
4125
EXPORT_SYMBOL_GPL(device_find_child);
4126
4127
int __init devices_init(void)
4128
{
4129
devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
4130
if (!devices_kset)
4131
return -ENOMEM;
4132
dev_kobj = kobject_create_and_add("dev", NULL);
4133
if (!dev_kobj)
4134
goto dev_kobj_err;
4135
sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
4136
if (!sysfs_dev_block_kobj)
4137
goto block_kobj_err;
4138
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
4139
if (!sysfs_dev_char_kobj)
4140
goto char_kobj_err;
4141
device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
4142
if (!device_link_wq)
4143
goto wq_err;
4144
4145
return 0;
4146
4147
wq_err:
4148
kobject_put(sysfs_dev_char_kobj);
4149
char_kobj_err:
4150
kobject_put(sysfs_dev_block_kobj);
4151
block_kobj_err:
4152
kobject_put(dev_kobj);
4153
dev_kobj_err:
4154
kset_unregister(devices_kset);
4155
return -ENOMEM;
4156
}
4157
4158
static int device_check_offline(struct device *dev, void *not_used)
4159
{
4160
int ret;
4161
4162
ret = device_for_each_child(dev, NULL, device_check_offline);
4163
if (ret)
4164
return ret;
4165
4166
return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
4167
}
4168
4169
/**
4170
* device_offline - Prepare the device for hot-removal.
4171
* @dev: Device to be put offline.
4172
*
4173
* Execute the device bus type's .offline() callback, if present, to prepare
4174
* the device for a subsequent hot-removal. If that succeeds, the device must
4175
* not be used until either it is removed or its bus type's .online() callback
4176
* is executed.
4177
*
4178
* Call under device_hotplug_lock.
4179
*/
4180
int device_offline(struct device *dev)
4181
{
4182
int ret;
4183
4184
if (dev->offline_disabled)
4185
return -EPERM;
4186
4187
ret = device_for_each_child(dev, NULL, device_check_offline);
4188
if (ret)
4189
return ret;
4190
4191
device_lock(dev);
4192
if (device_supports_offline(dev)) {
4193
if (dev->offline) {
4194
ret = 1;
4195
} else {
4196
ret = dev->bus->offline(dev);
4197
if (!ret) {
4198
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
4199
dev->offline = true;
4200
}
4201
}
4202
}
4203
device_unlock(dev);
4204
4205
return ret;
4206
}
4207
4208
/**
4209
* device_online - Put the device back online after successful device_offline().
4210
* @dev: Device to be put back online.
4211
*
4212
* If device_offline() has been successfully executed for @dev, but the device
4213
* has not been removed subsequently, execute its bus type's .online() callback
4214
* to indicate that the device can be used again.
4215
*
4216
* Call under device_hotplug_lock.
4217
*/
4218
int device_online(struct device *dev)
4219
{
4220
int ret = 0;
4221
4222
device_lock(dev);
4223
if (device_supports_offline(dev)) {
4224
if (dev->offline) {
4225
ret = dev->bus->online(dev);
4226
if (!ret) {
4227
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
4228
dev->offline = false;
4229
}
4230
} else {
4231
ret = 1;
4232
}
4233
}
4234
device_unlock(dev);
4235
4236
return ret;
4237
}
4238
4239
struct root_device {
4240
struct device dev;
4241
struct module *owner;
4242
};
4243
4244
static inline struct root_device *to_root_device(struct device *d)
4245
{
4246
return container_of(d, struct root_device, dev);
4247
}
4248
4249
static void root_device_release(struct device *dev)
4250
{
4251
kfree(to_root_device(dev));
4252
}
4253
4254
/**
4255
* __root_device_register - allocate and register a root device
4256
* @name: root device name
4257
* @owner: owner module of the root device, usually THIS_MODULE
4258
*
4259
* This function allocates a root device and registers it
4260
* using device_register(). In order to free the returned
4261
* device, use root_device_unregister().
4262
*
4263
* Root devices are dummy devices which allow other devices
4264
* to be grouped under /sys/devices. Use this function to
4265
* allocate a root device and then use it as the parent of
4266
* any device which should appear under /sys/devices/{name}
4267
*
4268
* The /sys/devices/{name} directory will also contain a
4269
* 'module' symlink which points to the @owner directory
4270
* in sysfs.
4271
*
4272
* Returns &struct device pointer on success, or ERR_PTR() on error.
4273
*
4274
* Note: You probably want to use root_device_register().
4275
*/
4276
struct device *__root_device_register(const char *name, struct module *owner)
4277
{
4278
struct root_device *root;
4279
int err = -ENOMEM;
4280
4281
root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
4282
if (!root)
4283
return ERR_PTR(err);
4284
4285
err = dev_set_name(&root->dev, "%s", name);
4286
if (err) {
4287
kfree(root);
4288
return ERR_PTR(err);
4289
}
4290
4291
root->dev.release = root_device_release;
4292
4293
err = device_register(&root->dev);
4294
if (err) {
4295
put_device(&root->dev);
4296
return ERR_PTR(err);
4297
}
4298
4299
#ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */
4300
if (owner) {
4301
struct module_kobject *mk = &owner->mkobj;
4302
4303
err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
4304
if (err) {
4305
device_unregister(&root->dev);
4306
return ERR_PTR(err);
4307
}
4308
root->owner = owner;
4309
}
4310
#endif
4311
4312
return &root->dev;
4313
}
4314
EXPORT_SYMBOL_GPL(__root_device_register);
4315
4316
/**
4317
* root_device_unregister - unregister and free a root device
4318
* @dev: device going away
4319
*
4320
* This function unregisters and cleans up a device that was created by
4321
* root_device_register().
4322
*/
4323
void root_device_unregister(struct device *dev)
4324
{
4325
struct root_device *root = to_root_device(dev);
4326
4327
if (root->owner)
4328
sysfs_remove_link(&root->dev.kobj, "module");
4329
4330
device_unregister(dev);
4331
}
4332
EXPORT_SYMBOL_GPL(root_device_unregister);
4333
4334
4335
static void device_create_release(struct device *dev)
4336
{
4337
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
4338
kfree(dev);
4339
}
4340
4341
static __printf(6, 0) struct device *
4342
device_create_groups_vargs(const struct class *class, struct device *parent,
4343
dev_t devt, void *drvdata,
4344
const struct attribute_group **groups,
4345
const char *fmt, va_list args)
4346
{
4347
struct device *dev = NULL;
4348
int retval = -ENODEV;
4349
4350
if (IS_ERR_OR_NULL(class))
4351
goto error;
4352
4353
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
4354
if (!dev) {
4355
retval = -ENOMEM;
4356
goto error;
4357
}
4358
4359
device_initialize(dev);
4360
dev->devt = devt;
4361
dev->class = class;
4362
dev->parent = parent;
4363
dev->groups = groups;
4364
dev->release = device_create_release;
4365
dev_set_drvdata(dev, drvdata);
4366
4367
retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
4368
if (retval)
4369
goto error;
4370
4371
retval = device_add(dev);
4372
if (retval)
4373
goto error;
4374
4375
return dev;
4376
4377
error:
4378
put_device(dev);
4379
return ERR_PTR(retval);
4380
}
4381
4382
/**
4383
* device_create - creates a device and registers it with sysfs
4384
* @class: pointer to the struct class that this device should be registered to
4385
* @parent: pointer to the parent struct device of this new device, if any
4386
* @devt: the dev_t for the char device to be added
4387
* @drvdata: the data to be added to the device for callbacks
4388
* @fmt: string for the device's name
4389
*
4390
* This function can be used by char device classes. A struct device
4391
* will be created in sysfs, registered to the specified class.
4392
*
4393
* A "dev" file will be created, showing the dev_t for the device, if
4394
* the dev_t is not 0,0.
4395
* If a pointer to a parent struct device is passed in, the newly created
4396
* struct device will be a child of that device in sysfs.
4397
* The pointer to the struct device will be returned from the call.
4398
* Any further sysfs files that might be required can be created using this
4399
* pointer.
4400
*
4401
* Returns &struct device pointer on success, or ERR_PTR() on error.
4402
*/
4403
struct device *device_create(const struct class *class, struct device *parent,
4404
dev_t devt, void *drvdata, const char *fmt, ...)
4405
{
4406
va_list vargs;
4407
struct device *dev;
4408
4409
va_start(vargs, fmt);
4410
dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
4411
fmt, vargs);
4412
va_end(vargs);
4413
return dev;
4414
}
4415
EXPORT_SYMBOL_GPL(device_create);
4416
4417
/**
4418
* device_create_with_groups - creates a device and registers it with sysfs
4419
* @class: pointer to the struct class that this device should be registered to
4420
* @parent: pointer to the parent struct device of this new device, if any
4421
* @devt: the dev_t for the char device to be added
4422
* @drvdata: the data to be added to the device for callbacks
4423
* @groups: NULL-terminated list of attribute groups to be created
4424
* @fmt: string for the device's name
4425
*
4426
* This function can be used by char device classes. A struct device
4427
* will be created in sysfs, registered to the specified class.
4428
* Additional attributes specified in the groups parameter will also
4429
* be created automatically.
4430
*
4431
* A "dev" file will be created, showing the dev_t for the device, if
4432
* the dev_t is not 0,0.
4433
* If a pointer to a parent struct device is passed in, the newly created
4434
* struct device will be a child of that device in sysfs.
4435
* The pointer to the struct device will be returned from the call.
4436
* Any further sysfs files that might be required can be created using this
4437
* pointer.
4438
*
4439
* Returns &struct device pointer on success, or ERR_PTR() on error.
4440
*/
4441
struct device *device_create_with_groups(const struct class *class,
4442
struct device *parent, dev_t devt,
4443
void *drvdata,
4444
const struct attribute_group **groups,
4445
const char *fmt, ...)
4446
{
4447
va_list vargs;
4448
struct device *dev;
4449
4450
va_start(vargs, fmt);
4451
dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
4452
fmt, vargs);
4453
va_end(vargs);
4454
return dev;
4455
}
4456
EXPORT_SYMBOL_GPL(device_create_with_groups);
4457
4458
/**
4459
* device_destroy - removes a device that was created with device_create()
4460
* @class: pointer to the struct class that this device was registered with
4461
* @devt: the dev_t of the device that was previously registered
4462
*
4463
* This call unregisters and cleans up a device that was created with a
4464
* call to device_create().
4465
*/
4466
void device_destroy(const struct class *class, dev_t devt)
4467
{
4468
struct device *dev;
4469
4470
dev = class_find_device_by_devt(class, devt);
4471
if (dev) {
4472
put_device(dev);
4473
device_unregister(dev);
4474
}
4475
}
4476
EXPORT_SYMBOL_GPL(device_destroy);
4477
4478
/**
4479
* device_rename - renames a device
4480
* @dev: the pointer to the struct device to be renamed
4481
* @new_name: the new name of the device
4482
*
4483
* It is the responsibility of the caller to provide mutual
4484
* exclusion between two different calls of device_rename
4485
* on the same device to ensure that new_name is valid and
4486
* won't conflict with other devices.
4487
*
4488
* Note: given that some subsystems (networking and infiniband) use this
4489
* function, with no immediate plans for this to change, we cannot assume or
4490
* require that this function not be called at all.
4491
*
4492
* However, if you're writing new code, do not call this function. The following
4493
* text from Kay Sievers offers some insight:
4494
*
4495
* Renaming devices is racy at many levels, symlinks and other stuff are not
4496
* replaced atomically, and you get a "move" uevent, but it's not easy to
4497
* connect the event to the old and new device. Device nodes are not renamed at
4498
* all, there isn't even support for that in the kernel now.
4499
*
4500
* In the meantime, during renaming, your target name might be taken by another
4501
* driver, creating conflicts. Or the old name is taken directly after you
4502
* renamed it -- then you get events for the same DEVPATH, before you even see
4503
* the "move" event. It's just a mess, and nothing new should ever rely on
4504
* kernel device renaming. Besides that, it's not even implemented now for
4505
* other things than (driver-core wise very simple) network devices.
4506
*
4507
* Make up a "real" name in the driver before you register anything, or add
4508
* some other attributes for userspace to find the device, or use udev to add
4509
* symlinks -- but never rename kernel devices later, it's a complete mess. We
4510
* don't even want to get into that and try to implement the missing pieces in
4511
* the core. We really have other pieces to fix in the driver core mess. :)
4512
*/
4513
int device_rename(struct device *dev, const char *new_name)
4514
{
4515
struct subsys_private *sp = NULL;
4516
struct kobject *kobj = &dev->kobj;
4517
char *old_device_name = NULL;
4518
int error;
4519
bool is_link_renamed = false;
4520
4521
dev = get_device(dev);
4522
if (!dev)
4523
return -EINVAL;
4524
4525
dev_dbg(dev, "renaming to %s\n", new_name);
4526
4527
old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
4528
if (!old_device_name) {
4529
error = -ENOMEM;
4530
goto out;
4531
}
4532
4533
if (dev->class) {
4534
sp = class_to_subsys(dev->class);
4535
4536
if (!sp) {
4537
error = -EINVAL;
4538
goto out;
4539
}
4540
4541
error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name,
4542
new_name, kobject_namespace(kobj));
4543
if (error)
4544
goto out;
4545
4546
is_link_renamed = true;
4547
}
4548
4549
error = kobject_rename(kobj, new_name);
4550
out:
4551
if (error && is_link_renamed)
4552
sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name,
4553
old_device_name, kobject_namespace(kobj));
4554
subsys_put(sp);
4555
4556
put_device(dev);
4557
4558
kfree(old_device_name);
4559
4560
return error;
4561
}
4562
EXPORT_SYMBOL_GPL(device_rename);
4563
4564
static int device_move_class_links(struct device *dev,
4565
struct device *old_parent,
4566
struct device *new_parent)
4567
{
4568
int error = 0;
4569
4570
if (old_parent)
4571
sysfs_remove_link(&dev->kobj, "device");
4572
if (new_parent)
4573
error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
4574
"device");
4575
return error;
4576
}
4577
4578
/**
4579
* device_move - moves a device to a new parent
4580
* @dev: the pointer to the struct device to be moved
4581
* @new_parent: the new parent of the device (can be NULL)
4582
* @dpm_order: how to reorder the dpm_list
4583
*/
4584
int device_move(struct device *dev, struct device *new_parent,
4585
enum dpm_order dpm_order)
4586
{
4587
int error;
4588
struct device *old_parent;
4589
struct kobject *new_parent_kobj;
4590
4591
dev = get_device(dev);
4592
if (!dev)
4593
return -EINVAL;
4594
4595
device_pm_lock();
4596
new_parent = get_device(new_parent);
4597
new_parent_kobj = get_device_parent(dev, new_parent);
4598
if (IS_ERR(new_parent_kobj)) {
4599
error = PTR_ERR(new_parent_kobj);
4600
put_device(new_parent);
4601
goto out;
4602
}
4603
4604
pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
4605
__func__, new_parent ? dev_name(new_parent) : "<NULL>");
4606
error = kobject_move(&dev->kobj, new_parent_kobj);
4607
if (error) {
4608
cleanup_glue_dir(dev, new_parent_kobj);
4609
put_device(new_parent);
4610
goto out;
4611
}
4612
old_parent = dev->parent;
4613
dev->parent = new_parent;
4614
if (old_parent)
4615
klist_remove(&dev->p->knode_parent);
4616
if (new_parent) {
4617
klist_add_tail(&dev->p->knode_parent,
4618
&new_parent->p->klist_children);
4619
set_dev_node(dev, dev_to_node(new_parent));
4620
}
4621
4622
if (dev->class) {
4623
error = device_move_class_links(dev, old_parent, new_parent);
4624
if (error) {
4625
/* We ignore errors on cleanup since we're hosed anyway... */
4626
device_move_class_links(dev, new_parent, old_parent);
4627
if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
4628
if (new_parent)
4629
klist_remove(&dev->p->knode_parent);
4630
dev->parent = old_parent;
4631
if (old_parent) {
4632
klist_add_tail(&dev->p->knode_parent,
4633
&old_parent->p->klist_children);
4634
set_dev_node(dev, dev_to_node(old_parent));
4635
}
4636
}
4637
cleanup_glue_dir(dev, new_parent_kobj);
4638
put_device(new_parent);
4639
goto out;
4640
}
4641
}
4642
switch (dpm_order) {
4643
case DPM_ORDER_NONE:
4644
break;
4645
case DPM_ORDER_DEV_AFTER_PARENT:
4646
device_pm_move_after(dev, new_parent);
4647
devices_kset_move_after(dev, new_parent);
4648
break;
4649
case DPM_ORDER_PARENT_BEFORE_DEV:
4650
device_pm_move_before(new_parent, dev);
4651
devices_kset_move_before(new_parent, dev);
4652
break;
4653
case DPM_ORDER_DEV_LAST:
4654
device_pm_move_last(dev);
4655
devices_kset_move_last(dev);
4656
break;
4657
}
4658
4659
put_device(old_parent);
4660
out:
4661
device_pm_unlock();
4662
put_device(dev);
4663
return error;
4664
}
4665
EXPORT_SYMBOL_GPL(device_move);
4666
4667
static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
4668
kgid_t kgid)
4669
{
4670
struct kobject *kobj = &dev->kobj;
4671
const struct class *class = dev->class;
4672
const struct device_type *type = dev->type;
4673
int error;
4674
4675
if (class) {
4676
/*
4677
* Change the device groups of the device class for @dev to
4678
* @kuid/@kgid.
4679
*/
4680
error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
4681
kgid);
4682
if (error)
4683
return error;
4684
}
4685
4686
if (type) {
4687
/*
4688
* Change the device groups of the device type for @dev to
4689
* @kuid/@kgid.
4690
*/
4691
error = sysfs_groups_change_owner(kobj, type->groups, kuid,
4692
kgid);
4693
if (error)
4694
return error;
4695
}
4696
4697
/* Change the device groups of @dev to @kuid/@kgid. */
4698
error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
4699
if (error)
4700
return error;
4701
4702
if (device_supports_offline(dev) && !dev->offline_disabled) {
4703
/* Change online device attributes of @dev to @kuid/@kgid. */
4704
error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
4705
kuid, kgid);
4706
if (error)
4707
return error;
4708
}
4709
4710
return 0;
4711
}
4712
4713
/**
4714
* device_change_owner - change the owner of an existing device.
4715
* @dev: device.
4716
* @kuid: new owner's kuid
4717
* @kgid: new owner's kgid
4718
*
4719
* This changes the owner of @dev and its corresponding sysfs entries to
4720
* @kuid/@kgid. This function closely mirrors how @dev was added via driver
4721
* core.
4722
*
4723
* Returns 0 on success or error code on failure.
4724
*/
4725
int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
4726
{
4727
int error;
4728
struct kobject *kobj = &dev->kobj;
4729
struct subsys_private *sp;
4730
4731
dev = get_device(dev);
4732
if (!dev)
4733
return -EINVAL;
4734
4735
/*
4736
* Change the kobject and the default attributes and groups of the
4737
* ktype associated with it to @kuid/@kgid.
4738
*/
4739
error = sysfs_change_owner(kobj, kuid, kgid);
4740
if (error)
4741
goto out;
4742
4743
/*
4744
* Change the uevent file for @dev to the new owner. The uevent file
4745
* was created in a separate step when @dev got added and we mirror
4746
* that step here.
4747
*/
4748
error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
4749
kgid);
4750
if (error)
4751
goto out;
4752
4753
/*
4754
* Change the device groups, the device groups associated with the
4755
* device class, and the groups associated with the device type of @dev
4756
* to @kuid/@kgid.
4757
*/
4758
error = device_attrs_change_owner(dev, kuid, kgid);
4759
if (error)
4760
goto out;
4761
4762
error = dpm_sysfs_change_owner(dev, kuid, kgid);
4763
if (error)
4764
goto out;
4765
4766
/*
4767
* Change the owner of the symlink located in the class directory of
4768
* the device class associated with @dev which points to the actual
4769
* directory entry for @dev to @kuid/@kgid. This ensures that the
4770
* symlink shows the same permissions as its target.
4771
*/
4772
sp = class_to_subsys(dev->class);
4773
if (!sp) {
4774
error = -EINVAL;
4775
goto out;
4776
}
4777
error = sysfs_link_change_owner(&sp->subsys.kobj, &dev->kobj, dev_name(dev), kuid, kgid);
4778
subsys_put(sp);
4779
4780
out:
4781
put_device(dev);
4782
return error;
4783
}
4784
EXPORT_SYMBOL_GPL(device_change_owner);
4785
4786
/**
4787
* device_shutdown - call ->shutdown() on each device to shutdown.
4788
*/
4789
void device_shutdown(void)
4790
{
4791
struct device *dev, *parent;
4792
4793
wait_for_device_probe();
4794
device_block_probing();
4795
4796
cpufreq_suspend();
4797
4798
spin_lock(&devices_kset->list_lock);
4799
/*
4800
* Walk the devices list backward, shutting down each in turn.
4801
* Beware that device unplug events may also start pulling
4802
* devices offline, even as the system is shutting down.
4803
*/
4804
while (!list_empty(&devices_kset->list)) {
4805
dev = list_entry(devices_kset->list.prev, struct device,
4806
kobj.entry);
4807
4808
/*
4809
* hold reference count of device's parent to
4810
* prevent it from being freed because parent's
4811
* lock is to be held
4812
*/
4813
parent = get_device(dev->parent);
4814
get_device(dev);
4815
/*
4816
* Make sure the device is off the kset list, in the
4817
* event that dev->*->shutdown() doesn't remove it.
4818
*/
4819
list_del_init(&dev->kobj.entry);
4820
spin_unlock(&devices_kset->list_lock);
4821
4822
/* hold lock to avoid race with probe/release */
4823
if (parent)
4824
device_lock(parent);
4825
device_lock(dev);
4826
4827
/* Don't allow any more runtime suspends */
4828
pm_runtime_get_noresume(dev);
4829
pm_runtime_barrier(dev);
4830
4831
if (dev->class && dev->class->shutdown_pre) {
4832
if (initcall_debug)
4833
dev_info(dev, "shutdown_pre\n");
4834
dev->class->shutdown_pre(dev);
4835
}
4836
if (dev->bus && dev->bus->shutdown) {
4837
if (initcall_debug)
4838
dev_info(dev, "shutdown\n");
4839
dev->bus->shutdown(dev);
4840
} else if (dev->driver && dev->driver->shutdown) {
4841
if (initcall_debug)
4842
dev_info(dev, "shutdown\n");
4843
dev->driver->shutdown(dev);
4844
}
4845
4846
device_unlock(dev);
4847
if (parent)
4848
device_unlock(parent);
4849
4850
put_device(dev);
4851
put_device(parent);
4852
4853
spin_lock(&devices_kset->list_lock);
4854
}
4855
spin_unlock(&devices_kset->list_lock);
4856
}
4857
4858
/*
4859
* Device logging functions
4860
*/
4861
4862
#ifdef CONFIG_PRINTK
4863
static void
4864
set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
4865
{
4866
const char *subsys;
4867
4868
memset(dev_info, 0, sizeof(*dev_info));
4869
4870
if (dev->class)
4871
subsys = dev->class->name;
4872
else if (dev->bus)
4873
subsys = dev->bus->name;
4874
else
4875
return;
4876
4877
strscpy(dev_info->subsystem, subsys);
4878
4879
/*
4880
* Add device identifier DEVICE=:
4881
* b12:8 block dev_t
4882
* c127:3 char dev_t
4883
* n8 netdev ifindex
4884
* +sound:card0 subsystem:devname
4885
*/
4886
if (MAJOR(dev->devt)) {
4887
char c;
4888
4889
if (strcmp(subsys, "block") == 0)
4890
c = 'b';
4891
else
4892
c = 'c';
4893
4894
snprintf(dev_info->device, sizeof(dev_info->device),
4895
"%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
4896
} else if (strcmp(subsys, "net") == 0) {
4897
struct net_device *net = to_net_dev(dev);
4898
4899
snprintf(dev_info->device, sizeof(dev_info->device),
4900
"n%u", net->ifindex);
4901
} else {
4902
snprintf(dev_info->device, sizeof(dev_info->device),
4903
"+%s:%s", subsys, dev_name(dev));
4904
}
4905
}
4906
4907
int dev_vprintk_emit(int level, const struct device *dev,
4908
const char *fmt, va_list args)
4909
{
4910
struct dev_printk_info dev_info;
4911
4912
set_dev_info(dev, &dev_info);
4913
4914
return vprintk_emit(0, level, &dev_info, fmt, args);
4915
}
4916
EXPORT_SYMBOL(dev_vprintk_emit);
4917
4918
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
4919
{
4920
va_list args;
4921
int r;
4922
4923
va_start(args, fmt);
4924
4925
r = dev_vprintk_emit(level, dev, fmt, args);
4926
4927
va_end(args);
4928
4929
return r;
4930
}
4931
EXPORT_SYMBOL(dev_printk_emit);
4932
4933
static void __dev_printk(const char *level, const struct device *dev,
4934
struct va_format *vaf)
4935
{
4936
if (dev)
4937
dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
4938
dev_driver_string(dev), dev_name(dev), vaf);
4939
else
4940
printk("%s(NULL device *): %pV", level, vaf);
4941
}
4942
4943
void _dev_printk(const char *level, const struct device *dev,
4944
const char *fmt, ...)
4945
{
4946
struct va_format vaf;
4947
va_list args;
4948
4949
va_start(args, fmt);
4950
4951
vaf.fmt = fmt;
4952
vaf.va = &args;
4953
4954
__dev_printk(level, dev, &vaf);
4955
4956
va_end(args);
4957
}
4958
EXPORT_SYMBOL(_dev_printk);
4959
4960
#define define_dev_printk_level(func, kern_level) \
4961
void func(const struct device *dev, const char *fmt, ...) \
4962
{ \
4963
struct va_format vaf; \
4964
va_list args; \
4965
\
4966
va_start(args, fmt); \
4967
\
4968
vaf.fmt = fmt; \
4969
vaf.va = &args; \
4970
\
4971
__dev_printk(kern_level, dev, &vaf); \
4972
\
4973
va_end(args); \
4974
} \
4975
EXPORT_SYMBOL(func);
4976
4977
define_dev_printk_level(_dev_emerg, KERN_EMERG);
4978
define_dev_printk_level(_dev_alert, KERN_ALERT);
4979
define_dev_printk_level(_dev_crit, KERN_CRIT);
4980
define_dev_printk_level(_dev_err, KERN_ERR);
4981
define_dev_printk_level(_dev_warn, KERN_WARNING);
4982
define_dev_printk_level(_dev_notice, KERN_NOTICE);
4983
define_dev_printk_level(_dev_info, KERN_INFO);
4984
4985
#endif
4986
4987
static void __dev_probe_failed(const struct device *dev, int err, bool fatal,
4988
const char *fmt, va_list vargsp)
4989
{
4990
struct va_format vaf;
4991
va_list vargs;
4992
4993
/*
4994
* On x86_64 and possibly on other architectures, va_list is actually a
4995
* size-1 array containing a structure. As a result, function parameter
4996
* vargsp decays from T[1] to T*, and &vargsp has type T** rather than
4997
* T(*)[1], which is expected by its assignment to vaf.va below.
4998
*
4999
* One standard way to solve this mess is by creating a copy in a local
5000
* variable of type va_list and then using a pointer to that local copy
5001
* instead, which is the approach employed here.
5002
*/
5003
va_copy(vargs, vargsp);
5004
5005
vaf.fmt = fmt;
5006
vaf.va = &vargs;
5007
5008
switch (err) {
5009
case -EPROBE_DEFER:
5010
device_set_deferred_probe_reason(dev, &vaf);
5011
dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
5012
break;
5013
5014
case -ENOMEM:
5015
/* Don't print anything on -ENOMEM, there's already enough output */
5016
break;
5017
5018
default:
5019
/* Log fatal final failures as errors, otherwise produce warnings */
5020
if (fatal)
5021
dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
5022
else
5023
dev_warn(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
5024
break;
5025
}
5026
5027
va_end(vargs);
5028
}
5029
5030
/**
5031
* dev_err_probe - probe error check and log helper
5032
* @dev: the pointer to the struct device
5033
* @err: error value to test
5034
* @fmt: printf-style format string
5035
* @...: arguments as specified in the format string
5036
*
5037
* This helper implements common pattern present in probe functions for error
5038
* checking: print debug or error message depending if the error value is
5039
* -EPROBE_DEFER and propagate error upwards.
5040
* In case of -EPROBE_DEFER it sets also defer probe reason, which can be
5041
* checked later by reading devices_deferred debugfs attribute.
5042
* It replaces the following code sequence::
5043
*
5044
* if (err != -EPROBE_DEFER)
5045
* dev_err(dev, ...);
5046
* else
5047
* dev_dbg(dev, ...);
5048
* return err;
5049
*
5050
* with::
5051
*
5052
* return dev_err_probe(dev, err, ...);
5053
*
5054
* Using this helper in your probe function is totally fine even if @err
5055
* is known to never be -EPROBE_DEFER.
5056
* The benefit compared to a normal dev_err() is the standardized format
5057
* of the error code, which is emitted symbolically (i.e. you get "EAGAIN"
5058
* instead of "-35"), and having the error code returned allows more
5059
* compact error paths.
5060
*
5061
* Returns @err.
5062
*/
5063
int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
5064
{
5065
va_list vargs;
5066
5067
va_start(vargs, fmt);
5068
5069
/* Use dev_err() for logging when err doesn't equal -EPROBE_DEFER */
5070
__dev_probe_failed(dev, err, true, fmt, vargs);
5071
5072
va_end(vargs);
5073
5074
return err;
5075
}
5076
EXPORT_SYMBOL_GPL(dev_err_probe);
5077
5078
/**
5079
* dev_warn_probe - probe error check and log helper
5080
* @dev: the pointer to the struct device
5081
* @err: error value to test
5082
* @fmt: printf-style format string
5083
* @...: arguments as specified in the format string
5084
*
5085
* This helper implements common pattern present in probe functions for error
5086
* checking: print debug or warning message depending if the error value is
5087
* -EPROBE_DEFER and propagate error upwards.
5088
* In case of -EPROBE_DEFER it sets also defer probe reason, which can be
5089
* checked later by reading devices_deferred debugfs attribute.
5090
* It replaces the following code sequence::
5091
*
5092
* if (err != -EPROBE_DEFER)
5093
* dev_warn(dev, ...);
5094
* else
5095
* dev_dbg(dev, ...);
5096
* return err;
5097
*
5098
* with::
5099
*
5100
* return dev_warn_probe(dev, err, ...);
5101
*
5102
* Using this helper in your probe function is totally fine even if @err
5103
* is known to never be -EPROBE_DEFER.
5104
* The benefit compared to a normal dev_warn() is the standardized format
5105
* of the error code, which is emitted symbolically (i.e. you get "EAGAIN"
5106
* instead of "-35"), and having the error code returned allows more
5107
* compact error paths.
5108
*
5109
* Returns @err.
5110
*/
5111
int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...)
5112
{
5113
va_list vargs;
5114
5115
va_start(vargs, fmt);
5116
5117
/* Use dev_warn() for logging when err doesn't equal -EPROBE_DEFER */
5118
__dev_probe_failed(dev, err, false, fmt, vargs);
5119
5120
va_end(vargs);
5121
5122
return err;
5123
}
5124
EXPORT_SYMBOL_GPL(dev_warn_probe);
5125
5126
static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
5127
{
5128
return fwnode && !IS_ERR(fwnode->secondary);
5129
}
5130
5131
/**
5132
* set_primary_fwnode - Change the primary firmware node of a given device.
5133
* @dev: Device to handle.
5134
* @fwnode: New primary firmware node of the device.
5135
*
5136
* Set the device's firmware node pointer to @fwnode, but if a secondary
5137
* firmware node of the device is present, preserve it.
5138
*
5139
* Valid fwnode cases are:
5140
* - primary --> secondary --> -ENODEV
5141
* - primary --> NULL
5142
* - secondary --> -ENODEV
5143
* - NULL
5144
*/
5145
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
5146
{
5147
struct device *parent = dev->parent;
5148
struct fwnode_handle *fn = dev->fwnode;
5149
5150
if (fwnode) {
5151
if (fwnode_is_primary(fn))
5152
fn = fn->secondary;
5153
5154
if (fn) {
5155
WARN_ON(fwnode->secondary);
5156
fwnode->secondary = fn;
5157
}
5158
dev->fwnode = fwnode;
5159
} else {
5160
if (fwnode_is_primary(fn)) {
5161
dev->fwnode = fn->secondary;
5162
5163
/* Skip nullifying fn->secondary if the primary is shared */
5164
if (parent && fn == parent->fwnode)
5165
return;
5166
5167
/* Set fn->secondary = NULL, so fn remains the primary fwnode */
5168
fn->secondary = NULL;
5169
} else {
5170
dev->fwnode = NULL;
5171
}
5172
}
5173
}
5174
EXPORT_SYMBOL_GPL(set_primary_fwnode);
5175
5176
/**
5177
* set_secondary_fwnode - Change the secondary firmware node of a given device.
5178
* @dev: Device to handle.
5179
* @fwnode: New secondary firmware node of the device.
5180
*
5181
* If a primary firmware node of the device is present, set its secondary
5182
* pointer to @fwnode. Otherwise, set the device's firmware node pointer to
5183
* @fwnode.
5184
*/
5185
void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
5186
{
5187
if (fwnode)
5188
fwnode->secondary = ERR_PTR(-ENODEV);
5189
5190
if (fwnode_is_primary(dev->fwnode))
5191
dev->fwnode->secondary = fwnode;
5192
else
5193
dev->fwnode = fwnode;
5194
}
5195
EXPORT_SYMBOL_GPL(set_secondary_fwnode);
5196
5197
/**
5198
* device_remove_of_node - Remove an of_node from a device
5199
* @dev: device whose device tree node is being removed
5200
*/
5201
void device_remove_of_node(struct device *dev)
5202
{
5203
dev = get_device(dev);
5204
if (!dev)
5205
return;
5206
5207
if (!dev->of_node)
5208
goto end;
5209
5210
if (dev->fwnode == of_fwnode_handle(dev->of_node))
5211
dev->fwnode = NULL;
5212
5213
of_node_put(dev->of_node);
5214
dev->of_node = NULL;
5215
5216
end:
5217
put_device(dev);
5218
}
5219
EXPORT_SYMBOL_GPL(device_remove_of_node);
5220
5221
/**
5222
* device_add_of_node - Add an of_node to an existing device
5223
* @dev: device whose device tree node is being added
5224
* @of_node: of_node to add
5225
*
5226
* Return: 0 on success or error code on failure.
5227
*/
5228
int device_add_of_node(struct device *dev, struct device_node *of_node)
5229
{
5230
int ret;
5231
5232
if (!of_node)
5233
return -EINVAL;
5234
5235
dev = get_device(dev);
5236
if (!dev)
5237
return -EINVAL;
5238
5239
if (dev->of_node) {
5240
dev_err(dev, "Cannot replace node %pOF with %pOF\n",
5241
dev->of_node, of_node);
5242
ret = -EBUSY;
5243
goto end;
5244
}
5245
5246
dev->of_node = of_node_get(of_node);
5247
5248
if (!dev->fwnode)
5249
dev->fwnode = of_fwnode_handle(of_node);
5250
5251
ret = 0;
5252
end:
5253
put_device(dev);
5254
return ret;
5255
}
5256
EXPORT_SYMBOL_GPL(device_add_of_node);
5257
5258
/**
5259
* device_set_of_node_from_dev - reuse device-tree node of another device
5260
* @dev: device whose device-tree node is being set
5261
* @dev2: device whose device-tree node is being reused
5262
*
5263
* Takes another reference to the new device-tree node after first dropping
5264
* any reference held to the old node.
5265
*/
5266
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
5267
{
5268
of_node_put(dev->of_node);
5269
dev->of_node = of_node_get(dev2->of_node);
5270
dev->of_node_reused = true;
5271
}
5272
EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
5273
5274
void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
5275
{
5276
dev->fwnode = fwnode;
5277
dev->of_node = to_of_node(fwnode);
5278
}
5279
EXPORT_SYMBOL_GPL(device_set_node);
5280
5281
struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode)
5282
{
5283
return get_device((fwnode)->dev);
5284
}
5285
EXPORT_SYMBOL_GPL(get_dev_from_fwnode);
5286
5287
int device_match_name(struct device *dev, const void *name)
5288
{
5289
return sysfs_streq(dev_name(dev), name);
5290
}
5291
EXPORT_SYMBOL_GPL(device_match_name);
5292
5293
int device_match_type(struct device *dev, const void *type)
5294
{
5295
return dev->type == type;
5296
}
5297
EXPORT_SYMBOL_GPL(device_match_type);
5298
5299
int device_match_of_node(struct device *dev, const void *np)
5300
{
5301
return np && dev->of_node == np;
5302
}
5303
EXPORT_SYMBOL_GPL(device_match_of_node);
5304
5305
int device_match_fwnode(struct device *dev, const void *fwnode)
5306
{
5307
return fwnode && dev_fwnode(dev) == fwnode;
5308
}
5309
EXPORT_SYMBOL_GPL(device_match_fwnode);
5310
5311
int device_match_devt(struct device *dev, const void *pdevt)
5312
{
5313
return dev->devt == *(dev_t *)pdevt;
5314
}
5315
EXPORT_SYMBOL_GPL(device_match_devt);
5316
5317
int device_match_acpi_dev(struct device *dev, const void *adev)
5318
{
5319
return adev && ACPI_COMPANION(dev) == adev;
5320
}
5321
EXPORT_SYMBOL(device_match_acpi_dev);
5322
5323
int device_match_acpi_handle(struct device *dev, const void *handle)
5324
{
5325
return handle && ACPI_HANDLE(dev) == handle;
5326
}
5327
EXPORT_SYMBOL(device_match_acpi_handle);
5328
5329
int device_match_any(struct device *dev, const void *unused)
5330
{
5331
return 1;
5332
}
5333
EXPORT_SYMBOL_GPL(device_match_any);
5334
5335