// SPDX-License-Identifier: GPL-2.01/*2* drivers/base/power/runtime.c - Helper functions for device runtime PM3*4* Copyright (c) 2009 Rafael J. Wysocki <[email protected]>, Novell Inc.5* Copyright (C) 2010 Alan Stern <[email protected]>6*/7#include <linux/sched/mm.h>8#include <linux/ktime.h>9#include <linux/hrtimer.h>10#include <linux/export.h>11#include <linux/pm_runtime.h>12#include <linux/pm_wakeirq.h>13#include <linux/rculist.h>14#include <trace/events/rpm.h>1516#include "../base.h"17#include "power.h"1819typedef int (*pm_callback_t)(struct device *);2021static inline pm_callback_t get_callback_ptr(const void *start, size_t offset)22{23return *(pm_callback_t *)(start + offset);24}2526static pm_callback_t __rpm_get_driver_callback(struct device *dev,27size_t cb_offset)28{29if (dev->driver && dev->driver->pm)30return get_callback_ptr(dev->driver->pm, cb_offset);3132return NULL;33}3435static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)36{37const struct dev_pm_ops *ops;38pm_callback_t cb = NULL;3940if (dev->pm_domain)41ops = &dev->pm_domain->ops;42else if (dev->type && dev->type->pm)43ops = dev->type->pm;44else if (dev->class && dev->class->pm)45ops = dev->class->pm;46else if (dev->bus && dev->bus->pm)47ops = dev->bus->pm;48else49ops = NULL;5051if (ops)52cb = get_callback_ptr(ops, cb_offset);5354if (!cb)55cb = __rpm_get_driver_callback(dev, cb_offset);5657return cb;58}5960#define RPM_GET_CALLBACK(dev, callback) \61__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))6263static int rpm_resume(struct device *dev, int rpmflags);64static int rpm_suspend(struct device *dev, int rpmflags);6566/**67* update_pm_runtime_accounting - Update the time accounting of power states68* @dev: Device to update the accounting for69*70* In order to be able to have time accounting of the various power states71* (as used by programs such as PowerTOP to show the effectiveness of runtime72* PM), we need to track the time spent in each state.73* update_pm_runtime_accounting must be called each time before the74* runtime_status field is updated, to account the time in the old state75* correctly.76*/77static void update_pm_runtime_accounting(struct device *dev)78{79u64 now, last, delta;8081if (dev->power.disable_depth > 0)82return;8384last = dev->power.accounting_timestamp;8586now = ktime_get_mono_fast_ns();87dev->power.accounting_timestamp = now;8889/*90* Because ktime_get_mono_fast_ns() is not monotonic during91* timekeeping updates, ensure that 'now' is after the last saved92* timesptamp.93*/94if (now < last)95return;9697delta = now - last;9899if (dev->power.runtime_status == RPM_SUSPENDED)100dev->power.suspended_time += delta;101else102dev->power.active_time += delta;103}104105static void __update_runtime_status(struct device *dev, enum rpm_status status)106{107update_pm_runtime_accounting(dev);108trace_rpm_status(dev, status);109dev->power.runtime_status = status;110}111112static u64 rpm_get_accounted_time(struct device *dev, bool suspended)113{114u64 time;115unsigned long flags;116117spin_lock_irqsave(&dev->power.lock, flags);118119update_pm_runtime_accounting(dev);120time = suspended ? dev->power.suspended_time : dev->power.active_time;121122spin_unlock_irqrestore(&dev->power.lock, flags);123124return time;125}126127u64 pm_runtime_active_time(struct device *dev)128{129return rpm_get_accounted_time(dev, false);130}131132u64 pm_runtime_suspended_time(struct device *dev)133{134return rpm_get_accounted_time(dev, true);135}136EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);137138/**139* pm_runtime_deactivate_timer - Deactivate given device's suspend timer.140* @dev: Device to handle.141*/142static void pm_runtime_deactivate_timer(struct device *dev)143{144if (dev->power.timer_expires > 0) {145hrtimer_try_to_cancel(&dev->power.suspend_timer);146dev->power.timer_expires = 0;147}148}149150/**151* pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.152* @dev: Device to handle.153*/154static void pm_runtime_cancel_pending(struct device *dev)155{156pm_runtime_deactivate_timer(dev);157/*158* In case there's a request pending, make sure its work function will159* return without doing anything.160*/161dev->power.request = RPM_REQ_NONE;162}163164/*165* pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.166* @dev: Device to handle.167*168* Compute the autosuspend-delay expiration time based on the device's169* power.last_busy time. If the delay has already expired or is disabled170* (negative) or the power.use_autosuspend flag isn't set, return 0.171* Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).172*173* This function may be called either with or without dev->power.lock held.174* Either way it can be racy, since power.last_busy may be updated at any time.175*/176u64 pm_runtime_autosuspend_expiration(struct device *dev)177{178int autosuspend_delay;179u64 expires;180181if (!dev->power.use_autosuspend)182return 0;183184autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);185if (autosuspend_delay < 0)186return 0;187188expires = READ_ONCE(dev->power.last_busy);189expires += (u64)autosuspend_delay * NSEC_PER_MSEC;190if (expires > ktime_get_mono_fast_ns())191return expires; /* Expires in the future */192193return 0;194}195EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);196197static int dev_memalloc_noio(struct device *dev, void *data)198{199return dev->power.memalloc_noio;200}201202/*203* pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.204* @dev: Device to handle.205* @enable: True for setting the flag and False for clearing the flag.206*207* Set the flag for all devices in the path from the device to the208* root device in the device tree if @enable is true, otherwise clear209* the flag for devices in the path whose siblings don't set the flag.210*211* The function should only be called by block device, or network212* device driver for solving the deadlock problem during runtime213* resume/suspend:214*215* If memory allocation with GFP_KERNEL is called inside runtime216* resume/suspend callback of any one of its ancestors(or the217* block device itself), the deadlock may be triggered inside the218* memory allocation since it might not complete until the block219* device becomes active and the involed page I/O finishes. The220* situation is pointed out first by Alan Stern. Network device221* are involved in iSCSI kind of situation.222*223* The lock of dev_hotplug_mutex is held in the function for handling224* hotplug race because pm_runtime_set_memalloc_noio() may be called225* in async probe().226*227* The function should be called between device_add() and device_del()228* on the affected device(block/network device).229*/230void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)231{232static DEFINE_MUTEX(dev_hotplug_mutex);233234mutex_lock(&dev_hotplug_mutex);235for (;;) {236bool enabled;237238/* hold power lock since bitfield is not SMP-safe. */239spin_lock_irq(&dev->power.lock);240enabled = dev->power.memalloc_noio;241dev->power.memalloc_noio = enable;242spin_unlock_irq(&dev->power.lock);243244/*245* not need to enable ancestors any more if the device246* has been enabled.247*/248if (enabled && enable)249break;250251dev = dev->parent;252253/*254* clear flag of the parent device only if all the255* children don't set the flag because ancestor's256* flag was set by any one of the descendants.257*/258if (!dev || (!enable &&259device_for_each_child(dev, NULL, dev_memalloc_noio)))260break;261}262mutex_unlock(&dev_hotplug_mutex);263}264EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);265266/**267* rpm_check_suspend_allowed - Test whether a device may be suspended.268* @dev: Device to test.269*/270static int rpm_check_suspend_allowed(struct device *dev)271{272int retval = 0;273274if (dev->power.runtime_error)275retval = -EINVAL;276else if (dev->power.disable_depth > 0)277retval = -EACCES;278else if (atomic_read(&dev->power.usage_count))279retval = -EAGAIN;280else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))281retval = -EBUSY;282283/* Pending resume requests take precedence over suspends. */284else if ((dev->power.deferred_resume &&285dev->power.runtime_status == RPM_SUSPENDING) ||286(dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))287retval = -EAGAIN;288else if (__dev_pm_qos_resume_latency(dev) == 0)289retval = -EPERM;290else if (dev->power.runtime_status == RPM_SUSPENDED)291retval = 1;292293return retval;294}295296static int rpm_get_suppliers(struct device *dev)297{298struct device_link *link;299300list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,301device_links_read_lock_held()) {302int retval;303304if (!device_link_test(link, DL_FLAG_PM_RUNTIME))305continue;306307retval = pm_runtime_get_sync(link->supplier);308/* Ignore suppliers with disabled runtime PM. */309if (retval < 0 && retval != -EACCES) {310pm_runtime_put_noidle(link->supplier);311return retval;312}313refcount_inc(&link->rpm_active);314}315return 0;316}317318/**319* pm_runtime_release_supplier - Drop references to device link's supplier.320* @link: Target device link.321*322* Drop all runtime PM references associated with @link to its supplier device.323*/324void pm_runtime_release_supplier(struct device_link *link)325{326struct device *supplier = link->supplier;327328/*329* The additional power.usage_count check is a safety net in case330* the rpm_active refcount becomes saturated, in which case331* refcount_dec_not_one() would return true forever, but it is not332* strictly necessary.333*/334while (refcount_dec_not_one(&link->rpm_active) &&335atomic_read(&supplier->power.usage_count) > 0)336pm_runtime_put_noidle(supplier);337}338339static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)340{341struct device_link *link;342343list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,344device_links_read_lock_held()) {345pm_runtime_release_supplier(link);346if (try_to_suspend)347pm_request_idle(link->supplier);348}349}350351static void rpm_put_suppliers(struct device *dev)352{353__rpm_put_suppliers(dev, true);354}355356static void rpm_suspend_suppliers(struct device *dev)357{358struct device_link *link;359int idx = device_links_read_lock();360361list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,362device_links_read_lock_held())363pm_request_idle(link->supplier);364365device_links_read_unlock(idx);366}367368/**369* __rpm_callback - Run a given runtime PM callback for a given device.370* @cb: Runtime PM callback to run.371* @dev: Device to run the callback for.372*/373static int __rpm_callback(int (*cb)(struct device *), struct device *dev)374__releases(&dev->power.lock) __acquires(&dev->power.lock)375{376int retval = 0, idx;377bool use_links = dev->power.links_count > 0;378379if (dev->power.irq_safe) {380spin_unlock(&dev->power.lock);381} else {382spin_unlock_irq(&dev->power.lock);383384/*385* Resume suppliers if necessary.386*387* The device's runtime PM status cannot change until this388* routine returns, so it is safe to read the status outside of389* the lock.390*/391if (use_links && dev->power.runtime_status == RPM_RESUMING) {392idx = device_links_read_lock();393394retval = rpm_get_suppliers(dev);395if (retval) {396rpm_put_suppliers(dev);397goto fail;398}399400device_links_read_unlock(idx);401}402}403404if (cb)405retval = cb(dev);406407if (dev->power.irq_safe) {408spin_lock(&dev->power.lock);409} else {410/*411* If the device is suspending and the callback has returned412* success, drop the usage counters of the suppliers that have413* been reference counted on its resume.414*415* Do that if resume fails too.416*/417if (use_links &&418((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||419(dev->power.runtime_status == RPM_RESUMING && retval))) {420idx = device_links_read_lock();421422__rpm_put_suppliers(dev, false);423424fail:425device_links_read_unlock(idx);426}427428spin_lock_irq(&dev->power.lock);429}430431return retval;432}433434/**435* rpm_callback - Run a given runtime PM callback for a given device.436* @cb: Runtime PM callback to run.437* @dev: Device to run the callback for.438*/439static int rpm_callback(int (*cb)(struct device *), struct device *dev)440{441int retval;442443if (dev->power.memalloc_noio) {444unsigned int noio_flag;445446/*447* Deadlock might be caused if memory allocation with448* GFP_KERNEL happens inside runtime_suspend and449* runtime_resume callbacks of one block device's450* ancestor or the block device itself. Network451* device might be thought as part of iSCSI block452* device, so network device and its ancestor should453* be marked as memalloc_noio too.454*/455noio_flag = memalloc_noio_save();456retval = __rpm_callback(cb, dev);457memalloc_noio_restore(noio_flag);458} else {459retval = __rpm_callback(cb, dev);460}461462/*463* Since -EACCES means that runtime PM is disabled for the given device,464* it should not be returned by runtime PM callbacks. If it is returned465* nevertheless, assume it to be a transient error and convert it to466* -EAGAIN.467*/468if (retval == -EACCES)469retval = -EAGAIN;470471if (retval != -EAGAIN && retval != -EBUSY)472dev->power.runtime_error = retval;473474return retval;475}476477/**478* rpm_idle - Notify device bus type if the device can be suspended.479* @dev: Device to notify the bus type about.480* @rpmflags: Flag bits.481*482* Check if the device's runtime PM status allows it to be suspended. If483* another idle notification has been started earlier, return immediately. If484* the RPM_ASYNC flag is set then queue an idle-notification request; otherwise485* run the ->runtime_idle() callback directly. If the ->runtime_idle callback486* doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.487*488* This function must be called under dev->power.lock with interrupts disabled.489*/490static int rpm_idle(struct device *dev, int rpmflags)491{492int (*callback)(struct device *);493int retval;494495trace_rpm_idle(dev, rpmflags);496retval = rpm_check_suspend_allowed(dev);497if (retval < 0)498; /* Conditions are wrong. */499500/* Idle notifications are allowed only in the RPM_ACTIVE state. */501else if (dev->power.runtime_status != RPM_ACTIVE)502retval = -EAGAIN;503504/*505* Any pending request other than an idle notification takes506* precedence over us, except that the timer may be running.507*/508else if (dev->power.request_pending &&509dev->power.request > RPM_REQ_IDLE)510retval = -EAGAIN;511512/* Act as though RPM_NOWAIT is always set. */513else if (dev->power.idle_notification)514retval = -EINPROGRESS;515516if (retval)517goto out;518519/* Pending requests need to be canceled. */520dev->power.request = RPM_REQ_NONE;521522callback = RPM_GET_CALLBACK(dev, runtime_idle);523524/* If no callback assume success. */525if (!callback || dev->power.no_callbacks)526goto out;527528/* Carry out an asynchronous or a synchronous idle notification. */529if (rpmflags & RPM_ASYNC) {530dev->power.request = RPM_REQ_IDLE;531if (!dev->power.request_pending) {532dev->power.request_pending = true;533queue_work(pm_wq, &dev->power.work);534}535trace_rpm_return_int(dev, _THIS_IP_, 0);536return 0;537}538539dev->power.idle_notification = true;540541if (dev->power.irq_safe)542spin_unlock(&dev->power.lock);543else544spin_unlock_irq(&dev->power.lock);545546retval = callback(dev);547548if (dev->power.irq_safe)549spin_lock(&dev->power.lock);550else551spin_lock_irq(&dev->power.lock);552553dev->power.idle_notification = false;554wake_up_all(&dev->power.wait_queue);555556out:557trace_rpm_return_int(dev, _THIS_IP_, retval);558return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);559}560561/**562* rpm_suspend - Carry out runtime suspend of given device.563* @dev: Device to suspend.564* @rpmflags: Flag bits.565*566* Check if the device's runtime PM status allows it to be suspended.567* Cancel a pending idle notification, autosuspend or suspend. If568* another suspend has been started earlier, either return immediately569* or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC570* flags. If the RPM_ASYNC flag is set then queue a suspend request;571* otherwise run the ->runtime_suspend() callback directly. When572* ->runtime_suspend succeeded, if a deferred resume was requested while573* the callback was running then carry it out, otherwise send an idle574* notification for its parent (if the suspend succeeded and both575* ignore_children of parent->power and irq_safe of dev->power are not set).576* If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO577* flag is set and the next autosuspend-delay expiration time is in the578* future, schedule another autosuspend attempt.579*580* This function must be called under dev->power.lock with interrupts disabled.581*/582static int rpm_suspend(struct device *dev, int rpmflags)583__releases(&dev->power.lock) __acquires(&dev->power.lock)584{585int (*callback)(struct device *);586struct device *parent = NULL;587int retval;588589trace_rpm_suspend(dev, rpmflags);590591repeat:592retval = rpm_check_suspend_allowed(dev);593if (retval < 0)594goto out; /* Conditions are wrong. */595596/* Synchronous suspends are not allowed in the RPM_RESUMING state. */597if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))598retval = -EAGAIN;599600if (retval)601goto out;602603/* If the autosuspend_delay time hasn't expired yet, reschedule. */604if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {605u64 expires = pm_runtime_autosuspend_expiration(dev);606607if (expires != 0) {608/* Pending requests need to be canceled. */609dev->power.request = RPM_REQ_NONE;610611/*612* Optimization: If the timer is already running and is613* set to expire at or before the autosuspend delay,614* avoid the overhead of resetting it. Just let it615* expire; pm_suspend_timer_fn() will take care of the616* rest.617*/618if (!(dev->power.timer_expires &&619dev->power.timer_expires <= expires)) {620/*621* We add a slack of 25% to gather wakeups622* without sacrificing the granularity.623*/624u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *625(NSEC_PER_MSEC >> 2);626627dev->power.timer_expires = expires;628hrtimer_start_range_ns(&dev->power.suspend_timer,629ns_to_ktime(expires),630slack,631HRTIMER_MODE_ABS);632}633dev->power.timer_autosuspends = 1;634goto out;635}636}637638/* Other scheduled or pending requests need to be canceled. */639pm_runtime_cancel_pending(dev);640641if (dev->power.runtime_status == RPM_SUSPENDING) {642DEFINE_WAIT(wait);643644if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {645retval = -EINPROGRESS;646goto out;647}648649if (dev->power.irq_safe) {650spin_unlock(&dev->power.lock);651652cpu_relax();653654spin_lock(&dev->power.lock);655goto repeat;656}657658/* Wait for the other suspend running in parallel with us. */659for (;;) {660prepare_to_wait(&dev->power.wait_queue, &wait,661TASK_UNINTERRUPTIBLE);662if (dev->power.runtime_status != RPM_SUSPENDING)663break;664665spin_unlock_irq(&dev->power.lock);666667schedule();668669spin_lock_irq(&dev->power.lock);670}671finish_wait(&dev->power.wait_queue, &wait);672goto repeat;673}674675if (dev->power.no_callbacks)676goto no_callback; /* Assume success. */677678/* Carry out an asynchronous or a synchronous suspend. */679if (rpmflags & RPM_ASYNC) {680dev->power.request = (rpmflags & RPM_AUTO) ?681RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;682if (!dev->power.request_pending) {683dev->power.request_pending = true;684queue_work(pm_wq, &dev->power.work);685}686goto out;687}688689__update_runtime_status(dev, RPM_SUSPENDING);690691callback = RPM_GET_CALLBACK(dev, runtime_suspend);692693dev_pm_enable_wake_irq_check(dev, true);694retval = rpm_callback(callback, dev);695if (retval)696goto fail;697698dev_pm_enable_wake_irq_complete(dev);699700no_callback:701__update_runtime_status(dev, RPM_SUSPENDED);702pm_runtime_deactivate_timer(dev);703704if (dev->parent) {705parent = dev->parent;706atomic_add_unless(&parent->power.child_count, -1, 0);707}708wake_up_all(&dev->power.wait_queue);709710if (dev->power.deferred_resume) {711dev->power.deferred_resume = false;712rpm_resume(dev, 0);713retval = -EAGAIN;714goto out;715}716717if (dev->power.irq_safe)718goto out;719720/* Maybe the parent is now able to suspend. */721if (parent && !parent->power.ignore_children) {722spin_unlock(&dev->power.lock);723724spin_lock(&parent->power.lock);725rpm_idle(parent, RPM_ASYNC);726spin_unlock(&parent->power.lock);727728spin_lock(&dev->power.lock);729}730/* Maybe the suppliers are now able to suspend. */731if (dev->power.links_count > 0) {732spin_unlock_irq(&dev->power.lock);733734rpm_suspend_suppliers(dev);735736spin_lock_irq(&dev->power.lock);737}738739out:740trace_rpm_return_int(dev, _THIS_IP_, retval);741742return retval;743744fail:745dev_pm_disable_wake_irq_check(dev, true);746__update_runtime_status(dev, RPM_ACTIVE);747dev->power.deferred_resume = false;748wake_up_all(&dev->power.wait_queue);749750/*751* On transient errors, if the callback routine failed an autosuspend,752* and if the last_busy time has been updated so that there is a new753* autosuspend expiration time, automatically reschedule another754* autosuspend.755*/756if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&757pm_runtime_autosuspend_expiration(dev) != 0)758goto repeat;759760pm_runtime_cancel_pending(dev);761762goto out;763}764765/**766* rpm_resume - Carry out runtime resume of given device.767* @dev: Device to resume.768* @rpmflags: Flag bits.769*770* Check if the device's runtime PM status allows it to be resumed. Cancel771* any scheduled or pending requests. If another resume has been started772* earlier, either return immediately or wait for it to finish, depending on the773* RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in774* parallel with this function, either tell the other process to resume after775* suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC776* flag is set then queue a resume request; otherwise run the777* ->runtime_resume() callback directly. Queue an idle notification for the778* device if the resume succeeded.779*780* This function must be called under dev->power.lock with interrupts disabled.781*/782static int rpm_resume(struct device *dev, int rpmflags)783__releases(&dev->power.lock) __acquires(&dev->power.lock)784{785int (*callback)(struct device *);786struct device *parent = NULL;787int retval = 0;788789trace_rpm_resume(dev, rpmflags);790791repeat:792if (dev->power.runtime_error) {793retval = -EINVAL;794} else if (dev->power.disable_depth > 0) {795if (dev->power.runtime_status == RPM_ACTIVE &&796dev->power.last_status == RPM_ACTIVE)797retval = 1;798else799retval = -EACCES;800}801if (retval)802goto out;803804/*805* Other scheduled or pending requests need to be canceled. Small806* optimization: If an autosuspend timer is running, leave it running807* rather than cancelling it now only to restart it again in the near808* future.809*/810dev->power.request = RPM_REQ_NONE;811if (!dev->power.timer_autosuspends)812pm_runtime_deactivate_timer(dev);813814if (dev->power.runtime_status == RPM_ACTIVE) {815retval = 1;816goto out;817}818819if (dev->power.runtime_status == RPM_RESUMING ||820dev->power.runtime_status == RPM_SUSPENDING) {821DEFINE_WAIT(wait);822823if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {824if (dev->power.runtime_status == RPM_SUSPENDING) {825dev->power.deferred_resume = true;826if (rpmflags & RPM_NOWAIT)827retval = -EINPROGRESS;828} else {829retval = -EINPROGRESS;830}831goto out;832}833834if (dev->power.irq_safe) {835spin_unlock(&dev->power.lock);836837cpu_relax();838839spin_lock(&dev->power.lock);840goto repeat;841}842843/* Wait for the operation carried out in parallel with us. */844for (;;) {845prepare_to_wait(&dev->power.wait_queue, &wait,846TASK_UNINTERRUPTIBLE);847if (dev->power.runtime_status != RPM_RESUMING &&848dev->power.runtime_status != RPM_SUSPENDING)849break;850851spin_unlock_irq(&dev->power.lock);852853schedule();854855spin_lock_irq(&dev->power.lock);856}857finish_wait(&dev->power.wait_queue, &wait);858goto repeat;859}860861/*862* See if we can skip waking up the parent. This is safe only if863* power.no_callbacks is set, because otherwise we don't know whether864* the resume will actually succeed.865*/866if (dev->power.no_callbacks && !parent && dev->parent) {867spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);868if (dev->parent->power.disable_depth > 0 ||869dev->parent->power.ignore_children ||870dev->parent->power.runtime_status == RPM_ACTIVE) {871atomic_inc(&dev->parent->power.child_count);872spin_unlock(&dev->parent->power.lock);873retval = 1;874goto no_callback; /* Assume success. */875}876spin_unlock(&dev->parent->power.lock);877}878879/* Carry out an asynchronous or a synchronous resume. */880if (rpmflags & RPM_ASYNC) {881dev->power.request = RPM_REQ_RESUME;882if (!dev->power.request_pending) {883dev->power.request_pending = true;884queue_work(pm_wq, &dev->power.work);885}886retval = 0;887goto out;888}889890if (!parent && dev->parent) {891/*892* Increment the parent's usage counter and resume it if893* necessary. Not needed if dev is irq-safe; then the894* parent is permanently resumed.895*/896parent = dev->parent;897if (dev->power.irq_safe)898goto skip_parent;899900spin_unlock(&dev->power.lock);901902pm_runtime_get_noresume(parent);903904spin_lock(&parent->power.lock);905/*906* Resume the parent if it has runtime PM enabled and not been907* set to ignore its children.908*/909if (!parent->power.disable_depth &&910!parent->power.ignore_children) {911rpm_resume(parent, 0);912if (parent->power.runtime_status != RPM_ACTIVE)913retval = -EBUSY;914}915spin_unlock(&parent->power.lock);916917spin_lock(&dev->power.lock);918if (retval)919goto out;920921goto repeat;922}923skip_parent:924925if (dev->power.no_callbacks)926goto no_callback; /* Assume success. */927928__update_runtime_status(dev, RPM_RESUMING);929930callback = RPM_GET_CALLBACK(dev, runtime_resume);931932dev_pm_disable_wake_irq_check(dev, false);933retval = rpm_callback(callback, dev);934if (retval) {935__update_runtime_status(dev, RPM_SUSPENDED);936pm_runtime_cancel_pending(dev);937dev_pm_enable_wake_irq_check(dev, false);938} else {939no_callback:940__update_runtime_status(dev, RPM_ACTIVE);941pm_runtime_mark_last_busy(dev);942if (parent)943atomic_inc(&parent->power.child_count);944}945wake_up_all(&dev->power.wait_queue);946947if (retval >= 0)948rpm_idle(dev, RPM_ASYNC);949950out:951if (parent && !dev->power.irq_safe) {952spin_unlock_irq(&dev->power.lock);953954pm_runtime_put(parent);955956spin_lock_irq(&dev->power.lock);957}958959trace_rpm_return_int(dev, _THIS_IP_, retval);960961return retval;962}963964/**965* pm_runtime_work - Universal runtime PM work function.966* @work: Work structure used for scheduling the execution of this function.967*968* Use @work to get the device object the work is to be done for, determine what969* is to be done and execute the appropriate runtime PM function.970*/971static void pm_runtime_work(struct work_struct *work)972{973struct device *dev = container_of(work, struct device, power.work);974enum rpm_request req;975976spin_lock_irq(&dev->power.lock);977978if (!dev->power.request_pending)979goto out;980981req = dev->power.request;982dev->power.request = RPM_REQ_NONE;983dev->power.request_pending = false;984985switch (req) {986case RPM_REQ_NONE:987break;988case RPM_REQ_IDLE:989rpm_idle(dev, RPM_NOWAIT);990break;991case RPM_REQ_SUSPEND:992rpm_suspend(dev, RPM_NOWAIT);993break;994case RPM_REQ_AUTOSUSPEND:995rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);996break;997case RPM_REQ_RESUME:998rpm_resume(dev, RPM_NOWAIT);999break;1000}10011002out:1003spin_unlock_irq(&dev->power.lock);1004}10051006/**1007* pm_suspend_timer_fn - Timer function for pm_schedule_suspend().1008* @timer: hrtimer used by pm_schedule_suspend().1009*1010* Check if the time is right and queue a suspend request.1011*/1012static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)1013{1014struct device *dev = container_of(timer, struct device, power.suspend_timer);1015unsigned long flags;1016u64 expires;10171018spin_lock_irqsave(&dev->power.lock, flags);10191020expires = dev->power.timer_expires;1021/*1022* If 'expires' is after the current time, we've been called1023* too early.1024*/1025if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {1026dev->power.timer_expires = 0;1027rpm_suspend(dev, dev->power.timer_autosuspends ?1028(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);1029}10301031spin_unlock_irqrestore(&dev->power.lock, flags);10321033return HRTIMER_NORESTART;1034}10351036/**1037* pm_schedule_suspend - Set up a timer to submit a suspend request in future.1038* @dev: Device to suspend.1039* @delay: Time to wait before submitting a suspend request, in milliseconds.1040*/1041int pm_schedule_suspend(struct device *dev, unsigned int delay)1042{1043unsigned long flags;1044u64 expires;1045int retval;10461047spin_lock_irqsave(&dev->power.lock, flags);10481049if (!delay) {1050retval = rpm_suspend(dev, RPM_ASYNC);1051goto out;1052}10531054retval = rpm_check_suspend_allowed(dev);1055if (retval)1056goto out;10571058/* Other scheduled or pending requests need to be canceled. */1059pm_runtime_cancel_pending(dev);10601061expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;1062dev->power.timer_expires = expires;1063dev->power.timer_autosuspends = 0;1064hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);10651066out:1067spin_unlock_irqrestore(&dev->power.lock, flags);10681069return retval;1070}1071EXPORT_SYMBOL_GPL(pm_schedule_suspend);10721073static int rpm_drop_usage_count(struct device *dev)1074{1075int ret;10761077ret = atomic_sub_return(1, &dev->power.usage_count);1078if (ret >= 0)1079return ret;10801081/*1082* Because rpm_resume() does not check the usage counter, it will resume1083* the device even if the usage counter is 0 or negative, so it is1084* sufficient to increment the usage counter here to reverse the change1085* made above.1086*/1087atomic_inc(&dev->power.usage_count);1088dev_warn(dev, "Runtime PM usage count underflow!\n");1089return -EINVAL;1090}10911092/**1093* __pm_runtime_idle - Entry point for runtime idle operations.1094* @dev: Device to send idle notification for.1095* @rpmflags: Flag bits.1096*1097* If the RPM_GET_PUT flag is set, decrement the device's usage count and1098* return immediately if it is larger than zero (if it becomes negative, log a1099* warning, increment it, and return an error). Then carry out an idle1100* notification, either synchronous or asynchronous.1101*1102* This routine may be called in atomic context if the RPM_ASYNC flag is set,1103* or if pm_runtime_irq_safe() has been called.1104*/1105int __pm_runtime_idle(struct device *dev, int rpmflags)1106{1107unsigned long flags;1108int retval;11091110if (rpmflags & RPM_GET_PUT) {1111retval = rpm_drop_usage_count(dev);1112if (retval < 0) {1113return retval;1114} else if (retval > 0) {1115trace_rpm_usage(dev, rpmflags);1116return 0;1117}1118}11191120might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);11211122spin_lock_irqsave(&dev->power.lock, flags);1123retval = rpm_idle(dev, rpmflags);1124spin_unlock_irqrestore(&dev->power.lock, flags);11251126return retval;1127}1128EXPORT_SYMBOL_GPL(__pm_runtime_idle);11291130/**1131* __pm_runtime_suspend - Entry point for runtime put/suspend operations.1132* @dev: Device to suspend.1133* @rpmflags: Flag bits.1134*1135* If the RPM_GET_PUT flag is set, decrement the device's usage count and1136* return immediately if it is larger than zero (if it becomes negative, log a1137* warning, increment it, and return an error). Then carry out a suspend,1138* either synchronous or asynchronous.1139*1140* This routine may be called in atomic context if the RPM_ASYNC flag is set,1141* or if pm_runtime_irq_safe() has been called.1142*/1143int __pm_runtime_suspend(struct device *dev, int rpmflags)1144{1145unsigned long flags;1146int retval;11471148if (rpmflags & RPM_GET_PUT) {1149retval = rpm_drop_usage_count(dev);1150if (retval < 0) {1151return retval;1152} else if (retval > 0) {1153trace_rpm_usage(dev, rpmflags);1154return 0;1155}1156}11571158might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);11591160spin_lock_irqsave(&dev->power.lock, flags);1161retval = rpm_suspend(dev, rpmflags);1162spin_unlock_irqrestore(&dev->power.lock, flags);11631164return retval;1165}1166EXPORT_SYMBOL_GPL(__pm_runtime_suspend);11671168/**1169* __pm_runtime_resume - Entry point for runtime resume operations.1170* @dev: Device to resume.1171* @rpmflags: Flag bits.1172*1173* If the RPM_GET_PUT flag is set, increment the device's usage count. Then1174* carry out a resume, either synchronous or asynchronous.1175*1176* This routine may be called in atomic context if the RPM_ASYNC flag is set,1177* or if pm_runtime_irq_safe() has been called.1178*/1179int __pm_runtime_resume(struct device *dev, int rpmflags)1180{1181unsigned long flags;1182int retval;11831184might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&1185dev->power.runtime_status != RPM_ACTIVE);11861187if (rpmflags & RPM_GET_PUT)1188atomic_inc(&dev->power.usage_count);11891190spin_lock_irqsave(&dev->power.lock, flags);1191retval = rpm_resume(dev, rpmflags);1192spin_unlock_irqrestore(&dev->power.lock, flags);11931194return retval;1195}1196EXPORT_SYMBOL_GPL(__pm_runtime_resume);11971198/**1199* pm_runtime_get_conditional - Conditionally bump up device usage counter.1200* @dev: Device to handle.1201* @ign_usage_count: Whether or not to look at the current usage counter value.1202*1203* Return -EINVAL if runtime PM is disabled for @dev.1204*1205* Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count1206* is set, or (2) @dev is not ignoring children and its active child count is1207* nonero, or (3) the runtime PM usage counter of @dev is not zero, increment1208* the usage counter of @dev and return 1.1209*1210* Otherwise, return 0 without changing the usage counter.1211*1212* If @ign_usage_count is %true, this function can be used to prevent suspending1213* the device when its runtime PM status is %RPM_ACTIVE.1214*1215* If @ign_usage_count is %false, this function can be used to prevent1216* suspending the device when both its runtime PM status is %RPM_ACTIVE and its1217* runtime PM usage counter is not zero.1218*1219* The caller is responsible for decrementing the runtime PM usage counter of1220* @dev after this function has returned a positive value for it.1221*/1222static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)1223{1224unsigned long flags;1225int retval;12261227spin_lock_irqsave(&dev->power.lock, flags);1228if (dev->power.disable_depth > 0) {1229retval = -EINVAL;1230} else if (dev->power.runtime_status != RPM_ACTIVE) {1231retval = 0;1232} else if (ign_usage_count || (!dev->power.ignore_children &&1233atomic_read(&dev->power.child_count) > 0)) {1234retval = 1;1235atomic_inc(&dev->power.usage_count);1236} else {1237retval = atomic_inc_not_zero(&dev->power.usage_count);1238}1239trace_rpm_usage(dev, 0);1240spin_unlock_irqrestore(&dev->power.lock, flags);12411242return retval;1243}12441245/**1246* pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is1247* in active state1248* @dev: Target device.1249*1250* Increment the runtime PM usage counter of @dev if its runtime PM status is1251* %RPM_ACTIVE, in which case it returns 1. If the device is in a different1252* state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the1253* device, in which case also the usage_count will remain unmodified.1254*/1255int pm_runtime_get_if_active(struct device *dev)1256{1257return pm_runtime_get_conditional(dev, true);1258}1259EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);12601261/**1262* pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.1263* @dev: Target device.1264*1265* Increment the runtime PM usage counter of @dev if its runtime PM status is1266* %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not1267* ignoring children and its active child count is nonzero. 1 is returned in1268* this case.1269*1270* If @dev is in a different state or it is not in use (that is, its usage1271* counter is 0, or it is ignoring children, or its active child count is 0),1272* 0 is returned.1273*1274* -EINVAL is returned if runtime PM is disabled for the device, in which case1275* also the usage counter of @dev is not updated.1276*/1277int pm_runtime_get_if_in_use(struct device *dev)1278{1279return pm_runtime_get_conditional(dev, false);1280}1281EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);12821283/**1284* __pm_runtime_set_status - Set runtime PM status of a device.1285* @dev: Device to handle.1286* @status: New runtime PM status of the device.1287*1288* If runtime PM of the device is disabled or its power.runtime_error field is1289* different from zero, the status may be changed either to RPM_ACTIVE, or to1290* RPM_SUSPENDED, as long as that reflects the actual state of the device.1291* However, if the device has a parent and the parent is not active, and the1292* parent's power.ignore_children flag is unset, the device's status cannot be1293* set to RPM_ACTIVE, so -EBUSY is returned in that case.1294*1295* If successful, __pm_runtime_set_status() clears the power.runtime_error field1296* and the device parent's counter of unsuspended children is modified to1297* reflect the new status. If the new status is RPM_SUSPENDED, an idle1298* notification request for the parent is submitted.1299*1300* If @dev has any suppliers (as reflected by device links to them), and @status1301* is RPM_ACTIVE, they will be activated upfront and if the activation of one1302* of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead1303* of the @status value) and the suppliers will be deacticated on exit. The1304* error returned by the failing supplier activation will be returned in that1305* case.1306*/1307int __pm_runtime_set_status(struct device *dev, unsigned int status)1308{1309struct device *parent = dev->parent;1310bool notify_parent = false;1311unsigned long flags;1312int error = 0;13131314if (status != RPM_ACTIVE && status != RPM_SUSPENDED)1315return -EINVAL;13161317spin_lock_irqsave(&dev->power.lock, flags);13181319/*1320* Prevent PM-runtime from being enabled for the device or return an1321* error if it is enabled already and working.1322*/1323if (dev->power.runtime_error || dev->power.disable_depth)1324dev->power.disable_depth++;1325else1326error = -EAGAIN;13271328spin_unlock_irqrestore(&dev->power.lock, flags);13291330if (error)1331return error;13321333/*1334* If the new status is RPM_ACTIVE, the suppliers can be activated1335* upfront regardless of the current status, because next time1336* rpm_put_suppliers() runs, the rpm_active refcounts of the links1337* involved will be dropped down to one anyway.1338*/1339if (status == RPM_ACTIVE) {1340int idx = device_links_read_lock();13411342error = rpm_get_suppliers(dev);1343if (error)1344status = RPM_SUSPENDED;13451346device_links_read_unlock(idx);1347}13481349spin_lock_irqsave(&dev->power.lock, flags);13501351if (dev->power.runtime_status == status || !parent)1352goto out_set;13531354if (status == RPM_SUSPENDED) {1355atomic_add_unless(&parent->power.child_count, -1, 0);1356notify_parent = !parent->power.ignore_children;1357} else {1358spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);13591360/*1361* It is invalid to put an active child under a parent that is1362* not active, has runtime PM enabled and the1363* 'power.ignore_children' flag unset.1364*/1365if (!parent->power.disable_depth &&1366!parent->power.ignore_children &&1367parent->power.runtime_status != RPM_ACTIVE) {1368dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",1369dev_name(dev),1370dev_name(parent));1371error = -EBUSY;1372} else if (dev->power.runtime_status == RPM_SUSPENDED) {1373atomic_inc(&parent->power.child_count);1374}13751376spin_unlock(&parent->power.lock);13771378if (error) {1379status = RPM_SUSPENDED;1380goto out;1381}1382}13831384out_set:1385__update_runtime_status(dev, status);1386if (!error)1387dev->power.runtime_error = 0;13881389out:1390spin_unlock_irqrestore(&dev->power.lock, flags);13911392if (notify_parent)1393pm_request_idle(parent);13941395if (status == RPM_SUSPENDED) {1396int idx = device_links_read_lock();13971398rpm_put_suppliers(dev);13991400device_links_read_unlock(idx);1401}14021403pm_runtime_enable(dev);14041405return error;1406}1407EXPORT_SYMBOL_GPL(__pm_runtime_set_status);14081409/**1410* __pm_runtime_barrier - Cancel pending requests and wait for completions.1411* @dev: Device to handle.1412*1413* Flush all pending requests for the device from pm_wq and wait for all1414* runtime PM operations involving the device in progress to complete.1415*1416* Should be called under dev->power.lock with interrupts disabled.1417*/1418static void __pm_runtime_barrier(struct device *dev)1419{1420pm_runtime_deactivate_timer(dev);14211422if (dev->power.request_pending) {1423dev->power.request = RPM_REQ_NONE;1424spin_unlock_irq(&dev->power.lock);14251426cancel_work_sync(&dev->power.work);14271428spin_lock_irq(&dev->power.lock);1429dev->power.request_pending = false;1430}14311432if (dev->power.runtime_status == RPM_SUSPENDING ||1433dev->power.runtime_status == RPM_RESUMING ||1434dev->power.idle_notification) {1435DEFINE_WAIT(wait);14361437/* Suspend, wake-up or idle notification in progress. */1438for (;;) {1439prepare_to_wait(&dev->power.wait_queue, &wait,1440TASK_UNINTERRUPTIBLE);1441if (dev->power.runtime_status != RPM_SUSPENDING1442&& dev->power.runtime_status != RPM_RESUMING1443&& !dev->power.idle_notification)1444break;1445spin_unlock_irq(&dev->power.lock);14461447schedule();14481449spin_lock_irq(&dev->power.lock);1450}1451finish_wait(&dev->power.wait_queue, &wait);1452}1453}14541455/**1456* pm_runtime_barrier - Flush pending requests and wait for completions.1457* @dev: Device to handle.1458*1459* Prevent the device from being suspended by incrementing its usage counter and1460* if there's a pending resume request for the device, wake the device up.1461* Next, make sure that all pending requests for the device have been flushed1462* from pm_wq and wait for all runtime PM operations involving the device in1463* progress to complete.1464*1465* Return value:1466* 1, if there was a resume request pending and the device had to be woken up,1467* 0, otherwise1468*/1469int pm_runtime_barrier(struct device *dev)1470{1471int retval = 0;14721473pm_runtime_get_noresume(dev);1474spin_lock_irq(&dev->power.lock);14751476if (dev->power.request_pending1477&& dev->power.request == RPM_REQ_RESUME) {1478rpm_resume(dev, 0);1479retval = 1;1480}14811482__pm_runtime_barrier(dev);14831484spin_unlock_irq(&dev->power.lock);1485pm_runtime_put_noidle(dev);14861487return retval;1488}1489EXPORT_SYMBOL_GPL(pm_runtime_barrier);14901491bool pm_runtime_block_if_disabled(struct device *dev)1492{1493bool ret;14941495spin_lock_irq(&dev->power.lock);14961497ret = !pm_runtime_enabled(dev);1498if (ret && dev->power.last_status == RPM_INVALID)1499dev->power.last_status = RPM_BLOCKED;15001501spin_unlock_irq(&dev->power.lock);15021503return ret;1504}15051506void pm_runtime_unblock(struct device *dev)1507{1508spin_lock_irq(&dev->power.lock);15091510if (dev->power.last_status == RPM_BLOCKED)1511dev->power.last_status = RPM_INVALID;15121513spin_unlock_irq(&dev->power.lock);1514}15151516void __pm_runtime_disable(struct device *dev, bool check_resume)1517{1518spin_lock_irq(&dev->power.lock);15191520if (dev->power.disable_depth > 0) {1521dev->power.disable_depth++;1522goto out;1523}15241525/*1526* Wake up the device if there's a resume request pending, because that1527* means there probably is some I/O to process and disabling runtime PM1528* shouldn't prevent the device from processing the I/O.1529*/1530if (check_resume && dev->power.request_pending &&1531dev->power.request == RPM_REQ_RESUME) {1532/*1533* Prevent suspends and idle notifications from being carried1534* out after we have woken up the device.1535*/1536pm_runtime_get_noresume(dev);15371538rpm_resume(dev, 0);15391540pm_runtime_put_noidle(dev);1541}15421543/* Update time accounting before disabling PM-runtime. */1544update_pm_runtime_accounting(dev);15451546if (!dev->power.disable_depth++) {1547__pm_runtime_barrier(dev);1548dev->power.last_status = dev->power.runtime_status;1549}15501551out:1552spin_unlock_irq(&dev->power.lock);1553}1554EXPORT_SYMBOL_GPL(__pm_runtime_disable);15551556/**1557* pm_runtime_enable - Enable runtime PM of a device.1558* @dev: Device to handle.1559*/1560void pm_runtime_enable(struct device *dev)1561{1562unsigned long flags;15631564spin_lock_irqsave(&dev->power.lock, flags);15651566if (!dev->power.disable_depth) {1567dev_warn(dev, "Unbalanced %s!\n", __func__);1568goto out;1569}15701571if (--dev->power.disable_depth > 0)1572goto out;15731574if (dev->power.last_status == RPM_BLOCKED) {1575dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");1576dump_stack();1577}1578dev->power.last_status = RPM_INVALID;1579dev->power.accounting_timestamp = ktime_get_mono_fast_ns();15801581if (dev->power.runtime_status == RPM_SUSPENDED &&1582!dev->power.ignore_children &&1583atomic_read(&dev->power.child_count) > 0)1584dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");15851586out:1587spin_unlock_irqrestore(&dev->power.lock, flags);1588}1589EXPORT_SYMBOL_GPL(pm_runtime_enable);15901591static void pm_runtime_set_suspended_action(void *data)1592{1593pm_runtime_set_suspended(data);1594}15951596/**1597* devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.1598*1599* @dev: Device to handle.1600*/1601int devm_pm_runtime_set_active_enabled(struct device *dev)1602{1603int err;16041605err = pm_runtime_set_active(dev);1606if (err)1607return err;16081609err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);1610if (err)1611return err;16121613return devm_pm_runtime_enable(dev);1614}1615EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);16161617static void pm_runtime_disable_action(void *data)1618{1619pm_runtime_dont_use_autosuspend(data);1620pm_runtime_disable(data);1621}16221623/**1624* devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.1625*1626* NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for1627* you at driver exit time if needed.1628*1629* @dev: Device to handle.1630*/1631int devm_pm_runtime_enable(struct device *dev)1632{1633pm_runtime_enable(dev);16341635return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);1636}1637EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);16381639static void pm_runtime_put_noidle_action(void *data)1640{1641pm_runtime_put_noidle(data);1642}16431644/**1645* devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.1646*1647* @dev: Device to handle.1648*/1649int devm_pm_runtime_get_noresume(struct device *dev)1650{1651pm_runtime_get_noresume(dev);16521653return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);1654}1655EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);16561657/**1658* pm_runtime_forbid - Block runtime PM of a device.1659* @dev: Device to handle.1660*1661* Increase the device's usage count and clear its power.runtime_auto flag,1662* so that it cannot be suspended at run time until pm_runtime_allow() is called1663* for it.1664*/1665void pm_runtime_forbid(struct device *dev)1666{1667spin_lock_irq(&dev->power.lock);1668if (!dev->power.runtime_auto)1669goto out;16701671dev->power.runtime_auto = false;1672atomic_inc(&dev->power.usage_count);1673rpm_resume(dev, 0);16741675out:1676spin_unlock_irq(&dev->power.lock);1677}1678EXPORT_SYMBOL_GPL(pm_runtime_forbid);16791680/**1681* pm_runtime_allow - Unblock runtime PM of a device.1682* @dev: Device to handle.1683*1684* Decrease the device's usage count and set its power.runtime_auto flag.1685*/1686void pm_runtime_allow(struct device *dev)1687{1688int ret;16891690spin_lock_irq(&dev->power.lock);1691if (dev->power.runtime_auto)1692goto out;16931694dev->power.runtime_auto = true;1695ret = rpm_drop_usage_count(dev);1696if (ret == 0)1697rpm_idle(dev, RPM_AUTO | RPM_ASYNC);1698else if (ret > 0)1699trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);17001701out:1702spin_unlock_irq(&dev->power.lock);1703}1704EXPORT_SYMBOL_GPL(pm_runtime_allow);17051706/**1707* pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.1708* @dev: Device to handle.1709*1710* Set the power.no_callbacks flag, which tells the PM core that this1711* device is power-managed through its parent and has no runtime PM1712* callbacks of its own. The runtime sysfs attributes will be removed.1713*/1714void pm_runtime_no_callbacks(struct device *dev)1715{1716spin_lock_irq(&dev->power.lock);1717dev->power.no_callbacks = 1;1718spin_unlock_irq(&dev->power.lock);1719if (device_is_registered(dev))1720rpm_sysfs_remove(dev);1721}1722EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);17231724/**1725* pm_runtime_irq_safe - Leave interrupts disabled during callbacks.1726* @dev: Device to handle1727*1728* Set the power.irq_safe flag, which tells the PM core that the1729* ->runtime_suspend() and ->runtime_resume() callbacks for this device should1730* always be invoked with the spinlock held and interrupts disabled. It also1731* causes the parent's usage counter to be permanently incremented, preventing1732* the parent from runtime suspending -- otherwise an irq-safe child might have1733* to wait for a non-irq-safe parent.1734*/1735void pm_runtime_irq_safe(struct device *dev)1736{1737if (dev->parent)1738pm_runtime_get_sync(dev->parent);17391740spin_lock_irq(&dev->power.lock);1741dev->power.irq_safe = 1;1742spin_unlock_irq(&dev->power.lock);1743}1744EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);17451746/**1747* update_autosuspend - Handle a change to a device's autosuspend settings.1748* @dev: Device to handle.1749* @old_delay: The former autosuspend_delay value.1750* @old_use: The former use_autosuspend value.1751*1752* Prevent runtime suspend if the new delay is negative and use_autosuspend is1753* set; otherwise allow it. Send an idle notification if suspends are allowed.1754*1755* This function must be called under dev->power.lock with interrupts disabled.1756*/1757static void update_autosuspend(struct device *dev, int old_delay, int old_use)1758{1759int delay = dev->power.autosuspend_delay;17601761/* Should runtime suspend be prevented now? */1762if (dev->power.use_autosuspend && delay < 0) {17631764/* If it used to be allowed then prevent it. */1765if (!old_use || old_delay >= 0) {1766atomic_inc(&dev->power.usage_count);1767rpm_resume(dev, 0);1768} else {1769trace_rpm_usage(dev, 0);1770}1771}17721773/* Runtime suspend should be allowed now. */1774else {17751776/* If it used to be prevented then allow it. */1777if (old_use && old_delay < 0)1778atomic_dec(&dev->power.usage_count);17791780/* Maybe we can autosuspend now. */1781rpm_idle(dev, RPM_AUTO);1782}1783}17841785/**1786* pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.1787* @dev: Device to handle.1788* @delay: Value of the new delay in milliseconds.1789*1790* Set the device's power.autosuspend_delay value. If it changes to negative1791* and the power.use_autosuspend flag is set, prevent runtime suspends. If it1792* changes the other way, allow runtime suspends.1793*/1794void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)1795{1796int old_delay, old_use;17971798spin_lock_irq(&dev->power.lock);1799old_delay = dev->power.autosuspend_delay;1800old_use = dev->power.use_autosuspend;1801dev->power.autosuspend_delay = delay;1802update_autosuspend(dev, old_delay, old_use);1803spin_unlock_irq(&dev->power.lock);1804}1805EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);18061807/**1808* __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.1809* @dev: Device to handle.1810* @use: New value for use_autosuspend.1811*1812* Set the device's power.use_autosuspend flag, and allow or prevent runtime1813* suspends as needed.1814*/1815void __pm_runtime_use_autosuspend(struct device *dev, bool use)1816{1817int old_delay, old_use;18181819spin_lock_irq(&dev->power.lock);1820old_delay = dev->power.autosuspend_delay;1821old_use = dev->power.use_autosuspend;1822dev->power.use_autosuspend = use;1823update_autosuspend(dev, old_delay, old_use);1824spin_unlock_irq(&dev->power.lock);1825}1826EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);18271828/**1829* pm_runtime_init - Initialize runtime PM fields in given device object.1830* @dev: Device object to initialize.1831*/1832void pm_runtime_init(struct device *dev)1833{1834dev->power.runtime_status = RPM_SUSPENDED;1835dev->power.last_status = RPM_INVALID;1836dev->power.idle_notification = false;18371838dev->power.disable_depth = 1;1839atomic_set(&dev->power.usage_count, 0);18401841dev->power.runtime_error = 0;18421843atomic_set(&dev->power.child_count, 0);1844pm_suspend_ignore_children(dev, false);1845dev->power.runtime_auto = true;18461847dev->power.request_pending = false;1848dev->power.request = RPM_REQ_NONE;1849dev->power.deferred_resume = false;1850dev->power.needs_force_resume = false;1851INIT_WORK(&dev->power.work, pm_runtime_work);18521853dev->power.timer_expires = 0;1854hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,1855HRTIMER_MODE_ABS);18561857init_waitqueue_head(&dev->power.wait_queue);1858}18591860/**1861* pm_runtime_reinit - Re-initialize runtime PM fields in given device object.1862* @dev: Device object to re-initialize.1863*/1864void pm_runtime_reinit(struct device *dev)1865{1866if (!pm_runtime_enabled(dev)) {1867if (dev->power.runtime_status == RPM_ACTIVE)1868pm_runtime_set_suspended(dev);1869if (dev->power.irq_safe) {1870spin_lock_irq(&dev->power.lock);1871dev->power.irq_safe = 0;1872spin_unlock_irq(&dev->power.lock);1873if (dev->parent)1874pm_runtime_put(dev->parent);1875}1876}1877/*1878* Clear power.needs_force_resume in case it has been set by1879* pm_runtime_force_suspend() invoked from a driver remove callback.1880*/1881dev->power.needs_force_resume = false;1882}18831884/**1885* pm_runtime_remove - Prepare for removing a device from device hierarchy.1886* @dev: Device object being removed from device hierarchy.1887*/1888void pm_runtime_remove(struct device *dev)1889{1890__pm_runtime_disable(dev, false);1891pm_runtime_reinit(dev);1892}18931894/**1895* pm_runtime_get_suppliers - Resume and reference-count supplier devices.1896* @dev: Consumer device.1897*/1898void pm_runtime_get_suppliers(struct device *dev)1899{1900struct device_link *link;1901int idx;19021903idx = device_links_read_lock();19041905list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,1906device_links_read_lock_held())1907if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {1908link->supplier_preactivated = true;1909pm_runtime_get_sync(link->supplier);1910}19111912device_links_read_unlock(idx);1913}19141915/**1916* pm_runtime_put_suppliers - Drop references to supplier devices.1917* @dev: Consumer device.1918*/1919void pm_runtime_put_suppliers(struct device *dev)1920{1921struct device_link *link;1922int idx;19231924idx = device_links_read_lock();19251926list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,1927device_links_read_lock_held())1928if (link->supplier_preactivated) {1929link->supplier_preactivated = false;1930pm_runtime_put(link->supplier);1931}19321933device_links_read_unlock(idx);1934}19351936void pm_runtime_new_link(struct device *dev)1937{1938spin_lock_irq(&dev->power.lock);1939dev->power.links_count++;1940spin_unlock_irq(&dev->power.lock);1941}19421943static void pm_runtime_drop_link_count(struct device *dev)1944{1945spin_lock_irq(&dev->power.lock);1946WARN_ON(dev->power.links_count == 0);1947dev->power.links_count--;1948spin_unlock_irq(&dev->power.lock);1949}19501951/**1952* pm_runtime_drop_link - Prepare for device link removal.1953* @link: Device link going away.1954*1955* Drop the link count of the consumer end of @link and decrement the supplier1956* device's runtime PM usage counter as many times as needed to drop all of the1957* PM runtime reference to it from the consumer.1958*/1959void pm_runtime_drop_link(struct device_link *link)1960{1961if (!device_link_test(link, DL_FLAG_PM_RUNTIME))1962return;19631964pm_runtime_drop_link_count(link->consumer);1965pm_runtime_release_supplier(link);1966pm_request_idle(link->supplier);1967}19681969static pm_callback_t get_callback(struct device *dev, size_t cb_offset)1970{1971/*1972* Setting power.strict_midlayer means that the middle layer1973* code does not want its runtime PM callbacks to be invoked via1974* pm_runtime_force_suspend() and pm_runtime_force_resume(), so1975* return a direct pointer to the driver callback in that case.1976*/1977if (dev_pm_strict_midlayer_is_set(dev))1978return __rpm_get_driver_callback(dev, cb_offset);19791980return __rpm_get_callback(dev, cb_offset);1981}19821983#define GET_CALLBACK(dev, callback) \1984get_callback(dev, offsetof(struct dev_pm_ops, callback))19851986/**1987* pm_runtime_force_suspend - Force a device into suspend state if needed.1988* @dev: Device to suspend.1989*1990* Disable runtime PM so we safely can check the device's runtime PM status and1991* if it is active, invoke its ->runtime_suspend callback to suspend it and1992* change its runtime PM status field to RPM_SUSPENDED. Also, if the device's1993* usage and children counters don't indicate that the device was in use before1994* the system-wide transition under way, decrement its parent's children counter1995* (if there is a parent). Keep runtime PM disabled to preserve the state1996* unless we encounter errors.1997*1998* Typically this function may be invoked from a system suspend callback to make1999* sure the device is put into low power state and it should only be used during2000* system-wide PM transitions to sleep states. It assumes that the analogous2001* pm_runtime_force_resume() will be used to resume the device.2002*/2003int pm_runtime_force_suspend(struct device *dev)2004{2005int (*callback)(struct device *);2006int ret;20072008pm_runtime_disable(dev);2009if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume)2010return 0;20112012callback = GET_CALLBACK(dev, runtime_suspend);20132014dev_pm_enable_wake_irq_check(dev, true);2015ret = callback ? callback(dev) : 0;2016if (ret)2017goto err;20182019dev_pm_enable_wake_irq_complete(dev);20202021/*2022* If the device can stay in suspend after the system-wide transition2023* to the working state that will follow, drop the children counter of2024* its parent and the usage counters of its suppliers. Otherwise, set2025* power.needs_force_resume to let pm_runtime_force_resume() know that2026* the device needs to be taken care of and to prevent this function2027* from handling the device again in case the device is passed to it2028* once more subsequently.2029*/2030if (pm_runtime_need_not_resume(dev))2031pm_runtime_set_suspended(dev);2032else2033dev->power.needs_force_resume = true;20342035return 0;20362037err:2038dev_pm_disable_wake_irq_check(dev, true);2039pm_runtime_enable(dev);2040return ret;2041}2042EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);20432044#ifdef CONFIG_PM_SLEEP20452046/**2047* pm_runtime_force_resume - Force a device into resume state if needed.2048* @dev: Device to resume.2049*2050* This function expects that either pm_runtime_force_suspend() has put the2051* device into a low-power state prior to calling it, or the device had been2052* runtime-suspended before the preceding system-wide suspend transition and it2053* was left in suspend during that transition.2054*2055* The actions carried out by pm_runtime_force_suspend(), or by a runtime2056* suspend in general, are reversed and the device is brought back into full2057* power if it is expected to be used on system resume, which is the case when2058* its needs_force_resume flag is set or when its smart_suspend flag is set and2059* its runtime PM status is "active".2060*2061* In other cases, the resume is deferred to be managed via runtime PM.2062*2063* Typically, this function may be invoked from a system resume callback.2064*/2065int pm_runtime_force_resume(struct device *dev)2066{2067int (*callback)(struct device *);2068int ret = 0;20692070if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) ||2071pm_runtime_status_suspended(dev)))2072goto out;20732074callback = GET_CALLBACK(dev, runtime_resume);20752076dev_pm_disable_wake_irq_check(dev, false);2077ret = callback ? callback(dev) : 0;2078if (ret) {2079pm_runtime_set_suspended(dev);2080dev_pm_enable_wake_irq_check(dev, false);2081goto out;2082}20832084pm_runtime_mark_last_busy(dev);20852086out:2087/*2088* The smart_suspend flag can be cleared here because it is not going2089* to be necessary until the next system-wide suspend transition that2090* will update it again.2091*/2092dev->power.smart_suspend = false;2093/*2094* Also clear needs_force_resume to make this function skip devices that2095* have been seen by it once.2096*/2097dev->power.needs_force_resume = false;20982099pm_runtime_enable(dev);2100return ret;2101}2102EXPORT_SYMBOL_GPL(pm_runtime_force_resume);21032104bool pm_runtime_need_not_resume(struct device *dev)2105{2106return atomic_read(&dev->power.usage_count) <= 1 &&2107(atomic_read(&dev->power.child_count) == 0 ||2108dev->power.ignore_children);2109}21102111#endif /* CONFIG_PM_SLEEP */211221132114