// SPDX-License-Identifier: GPL-2.01/*2* drivers/base/power/runtime.c - Helper functions for device runtime PM3*4* Copyright (c) 2009 Rafael J. Wysocki <[email protected]>, Novell Inc.5* Copyright (C) 2010 Alan Stern <[email protected]>6*/7#include <linux/sched/mm.h>8#include <linux/ktime.h>9#include <linux/hrtimer.h>10#include <linux/export.h>11#include <linux/pm_runtime.h>12#include <linux/pm_wakeirq.h>13#include <linux/rculist.h>14#include <trace/events/rpm.h>1516#include "../base.h"17#include "power.h"1819typedef int (*pm_callback_t)(struct device *);2021static inline pm_callback_t get_callback_ptr(const void *start, size_t offset)22{23return *(pm_callback_t *)(start + offset);24}2526static pm_callback_t __rpm_get_driver_callback(struct device *dev,27size_t cb_offset)28{29if (dev->driver && dev->driver->pm)30return get_callback_ptr(dev->driver->pm, cb_offset);3132return NULL;33}3435static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)36{37const struct dev_pm_ops *ops;38pm_callback_t cb = NULL;3940if (dev->pm_domain)41ops = &dev->pm_domain->ops;42else if (dev->type && dev->type->pm)43ops = dev->type->pm;44else if (dev->class && dev->class->pm)45ops = dev->class->pm;46else if (dev->bus && dev->bus->pm)47ops = dev->bus->pm;48else49ops = NULL;5051if (ops)52cb = get_callback_ptr(ops, cb_offset);5354if (!cb)55cb = __rpm_get_driver_callback(dev, cb_offset);5657return cb;58}5960#define RPM_GET_CALLBACK(dev, callback) \61__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))6263static int rpm_resume(struct device *dev, int rpmflags);64static int rpm_suspend(struct device *dev, int rpmflags);6566/**67* update_pm_runtime_accounting - Update the time accounting of power states68* @dev: Device to update the accounting for69*70* In order to be able to have time accounting of the various power states71* (as used by programs such as PowerTOP to show the effectiveness of runtime72* PM), we need to track the time spent in each state.73* update_pm_runtime_accounting must be called each time before the74* runtime_status field is updated, to account the time in the old state75* correctly.76*/77static void update_pm_runtime_accounting(struct device *dev)78{79u64 now, last, delta;8081if (dev->power.disable_depth > 0)82return;8384last = dev->power.accounting_timestamp;8586now = ktime_get_mono_fast_ns();87dev->power.accounting_timestamp = now;8889/*90* Because ktime_get_mono_fast_ns() is not monotonic during91* timekeeping updates, ensure that 'now' is after the last saved92* timestamp.93*/94if (now < last)95return;9697delta = now - last;9899if (dev->power.runtime_status == RPM_SUSPENDED)100dev->power.suspended_time += delta;101else102dev->power.active_time += delta;103}104105static void __update_runtime_status(struct device *dev, enum rpm_status status)106{107update_pm_runtime_accounting(dev);108trace_rpm_status(dev, status);109dev->power.runtime_status = status;110}111112static u64 rpm_get_accounted_time(struct device *dev, bool suspended)113{114u64 time;115unsigned long flags;116117spin_lock_irqsave(&dev->power.lock, flags);118119update_pm_runtime_accounting(dev);120time = suspended ? dev->power.suspended_time : dev->power.active_time;121122spin_unlock_irqrestore(&dev->power.lock, flags);123124return time;125}126127u64 pm_runtime_active_time(struct device *dev)128{129return rpm_get_accounted_time(dev, false);130}131132u64 pm_runtime_suspended_time(struct device *dev)133{134return rpm_get_accounted_time(dev, true);135}136EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);137138/**139* pm_runtime_deactivate_timer - Deactivate given device's suspend timer.140* @dev: Device to handle.141*/142static void pm_runtime_deactivate_timer(struct device *dev)143{144if (dev->power.timer_expires > 0) {145hrtimer_try_to_cancel(&dev->power.suspend_timer);146dev->power.timer_expires = 0;147}148}149150/**151* pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.152* @dev: Device to handle.153*/154static void pm_runtime_cancel_pending(struct device *dev)155{156pm_runtime_deactivate_timer(dev);157/*158* In case there's a request pending, make sure its work function will159* return without doing anything.160*/161dev->power.request = RPM_REQ_NONE;162}163164/*165* pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.166* @dev: Device to handle.167*168* Compute the autosuspend-delay expiration time based on the device's169* power.last_busy time. If the delay has already expired or is disabled170* (negative) or the power.use_autosuspend flag isn't set, return 0.171* Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).172*173* This function may be called either with or without dev->power.lock held.174* Either way it can be racy, since power.last_busy may be updated at any time.175*/176u64 pm_runtime_autosuspend_expiration(struct device *dev)177{178int autosuspend_delay;179u64 expires;180181if (!dev->power.use_autosuspend)182return 0;183184autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);185if (autosuspend_delay < 0)186return 0;187188expires = READ_ONCE(dev->power.last_busy);189expires += (u64)autosuspend_delay * NSEC_PER_MSEC;190if (expires > ktime_get_mono_fast_ns())191return expires; /* Expires in the future */192193return 0;194}195EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);196197static int dev_memalloc_noio(struct device *dev, void *data)198{199return dev->power.memalloc_noio;200}201202/*203* pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.204* @dev: Device to handle.205* @enable: True for setting the flag and False for clearing the flag.206*207* Set the flag for all devices in the path from the device to the208* root device in the device tree if @enable is true, otherwise clear209* the flag for devices in the path whose siblings don't set the flag.210*211* The function should only be called by block device, or network212* device driver for solving the deadlock problem during runtime213* resume/suspend:214*215* If memory allocation with GFP_KERNEL is called inside runtime216* resume/suspend callback of any one of its ancestors(or the217* block device itself), the deadlock may be triggered inside the218* memory allocation since it might not complete until the block219* device becomes active and the involved page I/O finishes. The220* situation is pointed out first by Alan Stern. Network device221* are involved in iSCSI kind of situation.222*223* The lock of dev_hotplug_mutex is held in the function for handling224* hotplug race because pm_runtime_set_memalloc_noio() may be called225* in async probe().226*227* The function should be called between device_add() and device_del()228* on the affected device(block/network device).229*/230void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)231{232static DEFINE_MUTEX(dev_hotplug_mutex);233234mutex_lock(&dev_hotplug_mutex);235for (;;) {236bool enabled;237238/* hold power lock since bitfield is not SMP-safe. */239spin_lock_irq(&dev->power.lock);240enabled = dev->power.memalloc_noio;241dev->power.memalloc_noio = enable;242spin_unlock_irq(&dev->power.lock);243244/*245* not need to enable ancestors any more if the device246* has been enabled.247*/248if (enabled && enable)249break;250251dev = dev->parent;252253/*254* clear flag of the parent device only if all the255* children don't set the flag because ancestor's256* flag was set by any one of the descendants.257*/258if (!dev || (!enable &&259device_for_each_child(dev, NULL, dev_memalloc_noio)))260break;261}262mutex_unlock(&dev_hotplug_mutex);263}264EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);265266/**267* rpm_check_suspend_allowed - Test whether a device may be suspended.268* @dev: Device to test.269*/270static int rpm_check_suspend_allowed(struct device *dev)271{272int retval = 0;273274if (dev->power.runtime_error)275retval = -EINVAL;276else if (dev->power.disable_depth > 0)277retval = -EACCES;278else if (atomic_read(&dev->power.usage_count))279retval = -EAGAIN;280else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))281retval = -EBUSY;282283/* Pending resume requests take precedence over suspends. */284else if ((dev->power.deferred_resume &&285dev->power.runtime_status == RPM_SUSPENDING) ||286(dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))287retval = -EAGAIN;288else if (__dev_pm_qos_resume_latency(dev) == 0)289retval = -EPERM;290else if (dev->power.runtime_status == RPM_SUSPENDED)291retval = 1;292293return retval;294}295296static int rpm_get_suppliers(struct device *dev)297{298struct device_link *link;299300list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,301device_links_read_lock_held()) {302int retval;303304if (!device_link_test(link, DL_FLAG_PM_RUNTIME))305continue;306307retval = pm_runtime_get_sync(link->supplier);308/* Ignore suppliers with disabled runtime PM. */309if (retval < 0 && retval != -EACCES) {310pm_runtime_put_noidle(link->supplier);311return retval;312}313refcount_inc(&link->rpm_active);314}315return 0;316}317318/**319* pm_runtime_release_supplier - Drop references to device link's supplier.320* @link: Target device link.321*322* Drop all runtime PM references associated with @link to its supplier device.323*/324void pm_runtime_release_supplier(struct device_link *link)325{326struct device *supplier = link->supplier;327328/*329* The additional power.usage_count check is a safety net in case330* the rpm_active refcount becomes saturated, in which case331* refcount_dec_not_one() would return true forever, but it is not332* strictly necessary.333*/334while (refcount_dec_not_one(&link->rpm_active) &&335atomic_read(&supplier->power.usage_count) > 0)336pm_runtime_put_noidle(supplier);337}338339static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)340{341struct device_link *link;342343list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,344device_links_read_lock_held()) {345pm_runtime_release_supplier(link);346if (try_to_suspend)347pm_request_idle(link->supplier);348}349}350351static void rpm_put_suppliers(struct device *dev)352{353__rpm_put_suppliers(dev, true);354}355356static void rpm_suspend_suppliers(struct device *dev)357{358struct device_link *link;359int idx = device_links_read_lock();360361list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,362device_links_read_lock_held())363pm_request_idle(link->supplier);364365device_links_read_unlock(idx);366}367368/**369* __rpm_callback - Run a given runtime PM callback for a given device.370* @cb: Runtime PM callback to run.371* @dev: Device to run the callback for.372*/373static int __rpm_callback(int (*cb)(struct device *), struct device *dev)374__releases(&dev->power.lock) __acquires(&dev->power.lock)375{376int retval = 0, idx;377bool use_links = dev->power.links_count > 0;378379if (dev->power.irq_safe) {380spin_unlock(&dev->power.lock);381} else {382spin_unlock_irq(&dev->power.lock);383384/*385* Resume suppliers if necessary.386*387* The device's runtime PM status cannot change until this388* routine returns, so it is safe to read the status outside of389* the lock.390*/391if (use_links && dev->power.runtime_status == RPM_RESUMING) {392idx = device_links_read_lock();393394retval = rpm_get_suppliers(dev);395if (retval) {396rpm_put_suppliers(dev);397goto fail;398}399400device_links_read_unlock(idx);401}402}403404if (cb)405retval = cb(dev);406407if (dev->power.irq_safe) {408spin_lock(&dev->power.lock);409} else {410/*411* If the device is suspending and the callback has returned412* success, drop the usage counters of the suppliers that have413* been reference counted on its resume.414*415* Do that if resume fails too.416*/417if (use_links &&418((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||419(dev->power.runtime_status == RPM_RESUMING && retval))) {420idx = device_links_read_lock();421422__rpm_put_suppliers(dev, false);423424fail:425device_links_read_unlock(idx);426}427428spin_lock_irq(&dev->power.lock);429}430431return retval;432}433434/**435* rpm_callback - Run a given runtime PM callback for a given device.436* @cb: Runtime PM callback to run.437* @dev: Device to run the callback for.438*/439static int rpm_callback(int (*cb)(struct device *), struct device *dev)440{441int retval;442443if (dev->power.memalloc_noio) {444unsigned int noio_flag;445446/*447* Deadlock might be caused if memory allocation with448* GFP_KERNEL happens inside runtime_suspend and449* runtime_resume callbacks of one block device's450* ancestor or the block device itself. Network451* device might be thought as part of iSCSI block452* device, so network device and its ancestor should453* be marked as memalloc_noio too.454*/455noio_flag = memalloc_noio_save();456retval = __rpm_callback(cb, dev);457memalloc_noio_restore(noio_flag);458} else {459retval = __rpm_callback(cb, dev);460}461462/*463* Since -EACCES means that runtime PM is disabled for the given device,464* it should not be returned by runtime PM callbacks. If it is returned465* nevertheless, assume it to be a transient error and convert it to466* -EAGAIN.467*/468if (retval == -EACCES)469retval = -EAGAIN;470471if (retval != -EAGAIN && retval != -EBUSY)472dev->power.runtime_error = retval;473474return retval;475}476477/**478* rpm_idle - Notify device bus type if the device can be suspended.479* @dev: Device to notify the bus type about.480* @rpmflags: Flag bits.481*482* Check if the device's runtime PM status allows it to be suspended. If483* another idle notification has been started earlier, return immediately. If484* the RPM_ASYNC flag is set then queue an idle-notification request; otherwise485* run the ->runtime_idle() callback directly. If the ->runtime_idle callback486* doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.487*488* This function must be called under dev->power.lock with interrupts disabled.489*/490static int rpm_idle(struct device *dev, int rpmflags)491{492int (*callback)(struct device *);493int retval;494495trace_rpm_idle(dev, rpmflags);496retval = rpm_check_suspend_allowed(dev);497if (retval < 0)498; /* Conditions are wrong. */499500else if ((rpmflags & RPM_GET_PUT) && retval == 1)501; /* put() is allowed in RPM_SUSPENDED */502503/* Idle notifications are allowed only in the RPM_ACTIVE state. */504else if (dev->power.runtime_status != RPM_ACTIVE)505retval = -EAGAIN;506507/*508* Any pending request other than an idle notification takes509* precedence over us, except that the timer may be running.510*/511else if (dev->power.request_pending &&512dev->power.request > RPM_REQ_IDLE)513retval = -EAGAIN;514515/* Act as though RPM_NOWAIT is always set. */516else if (dev->power.idle_notification)517retval = -EINPROGRESS;518519if (retval)520goto out;521522/* Pending requests need to be canceled. */523dev->power.request = RPM_REQ_NONE;524525callback = RPM_GET_CALLBACK(dev, runtime_idle);526527/* If no callback assume success. */528if (!callback || dev->power.no_callbacks)529goto out;530531/* Carry out an asynchronous or a synchronous idle notification. */532if (rpmflags & RPM_ASYNC) {533dev->power.request = RPM_REQ_IDLE;534if (!dev->power.request_pending) {535dev->power.request_pending = true;536queue_work(pm_wq, &dev->power.work);537}538trace_rpm_return_int(dev, _THIS_IP_, 0);539return 0;540}541542dev->power.idle_notification = true;543544if (dev->power.irq_safe)545spin_unlock(&dev->power.lock);546else547spin_unlock_irq(&dev->power.lock);548549retval = callback(dev);550551if (dev->power.irq_safe)552spin_lock(&dev->power.lock);553else554spin_lock_irq(&dev->power.lock);555556dev->power.idle_notification = false;557wake_up_all(&dev->power.wait_queue);558559out:560trace_rpm_return_int(dev, _THIS_IP_, retval);561return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);562}563564/**565* rpm_suspend - Carry out runtime suspend of given device.566* @dev: Device to suspend.567* @rpmflags: Flag bits.568*569* Check if the device's runtime PM status allows it to be suspended.570* Cancel a pending idle notification, autosuspend or suspend. If571* another suspend has been started earlier, either return immediately572* or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC573* flags. If the RPM_ASYNC flag is set then queue a suspend request;574* otherwise run the ->runtime_suspend() callback directly. When575* ->runtime_suspend succeeded, if a deferred resume was requested while576* the callback was running then carry it out, otherwise send an idle577* notification for its parent (if the suspend succeeded and both578* ignore_children of parent->power and irq_safe of dev->power are not set).579* If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO580* flag is set and the next autosuspend-delay expiration time is in the581* future, schedule another autosuspend attempt.582*583* This function must be called under dev->power.lock with interrupts disabled.584*/585static int rpm_suspend(struct device *dev, int rpmflags)586__releases(&dev->power.lock) __acquires(&dev->power.lock)587{588int (*callback)(struct device *);589struct device *parent = NULL;590int retval;591592trace_rpm_suspend(dev, rpmflags);593594repeat:595retval = rpm_check_suspend_allowed(dev);596if (retval < 0)597goto out; /* Conditions are wrong. */598599/* Synchronous suspends are not allowed in the RPM_RESUMING state. */600if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))601retval = -EAGAIN;602603if (retval)604goto out;605606/* If the autosuspend_delay time hasn't expired yet, reschedule. */607if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {608u64 expires = pm_runtime_autosuspend_expiration(dev);609610if (expires != 0) {611/* Pending requests need to be canceled. */612dev->power.request = RPM_REQ_NONE;613614/*615* Optimization: If the timer is already running and is616* set to expire at or before the autosuspend delay,617* avoid the overhead of resetting it. Just let it618* expire; pm_suspend_timer_fn() will take care of the619* rest.620*/621if (!(dev->power.timer_expires &&622dev->power.timer_expires <= expires)) {623/*624* We add a slack of 25% to gather wakeups625* without sacrificing the granularity.626*/627u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *628(NSEC_PER_MSEC >> 2);629630dev->power.timer_expires = expires;631hrtimer_start_range_ns(&dev->power.suspend_timer,632ns_to_ktime(expires),633slack,634HRTIMER_MODE_ABS);635}636dev->power.timer_autosuspends = 1;637goto out;638}639}640641/* Other scheduled or pending requests need to be canceled. */642pm_runtime_cancel_pending(dev);643644if (dev->power.runtime_status == RPM_SUSPENDING) {645DEFINE_WAIT(wait);646647if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {648retval = -EINPROGRESS;649goto out;650}651652if (dev->power.irq_safe) {653spin_unlock(&dev->power.lock);654655cpu_relax();656657spin_lock(&dev->power.lock);658goto repeat;659}660661/* Wait for the other suspend running in parallel with us. */662for (;;) {663prepare_to_wait(&dev->power.wait_queue, &wait,664TASK_UNINTERRUPTIBLE);665if (dev->power.runtime_status != RPM_SUSPENDING)666break;667668spin_unlock_irq(&dev->power.lock);669670schedule();671672spin_lock_irq(&dev->power.lock);673}674finish_wait(&dev->power.wait_queue, &wait);675goto repeat;676}677678if (dev->power.no_callbacks)679goto no_callback; /* Assume success. */680681/* Carry out an asynchronous or a synchronous suspend. */682if (rpmflags & RPM_ASYNC) {683dev->power.request = (rpmflags & RPM_AUTO) ?684RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;685if (!dev->power.request_pending) {686dev->power.request_pending = true;687queue_work(pm_wq, &dev->power.work);688}689goto out;690}691692__update_runtime_status(dev, RPM_SUSPENDING);693694callback = RPM_GET_CALLBACK(dev, runtime_suspend);695696dev_pm_enable_wake_irq_check(dev, true);697retval = rpm_callback(callback, dev);698if (retval)699goto fail;700701dev_pm_enable_wake_irq_complete(dev);702703no_callback:704__update_runtime_status(dev, RPM_SUSPENDED);705pm_runtime_deactivate_timer(dev);706707if (dev->parent) {708parent = dev->parent;709atomic_add_unless(&parent->power.child_count, -1, 0);710}711wake_up_all(&dev->power.wait_queue);712713if (dev->power.deferred_resume) {714dev->power.deferred_resume = false;715rpm_resume(dev, 0);716retval = -EAGAIN;717goto out;718}719720if (dev->power.irq_safe)721goto out;722723/* Maybe the parent is now able to suspend. */724if (parent && !parent->power.ignore_children) {725spin_unlock(&dev->power.lock);726727spin_lock(&parent->power.lock);728rpm_idle(parent, RPM_ASYNC);729spin_unlock(&parent->power.lock);730731spin_lock(&dev->power.lock);732}733/* Maybe the suppliers are now able to suspend. */734if (dev->power.links_count > 0) {735spin_unlock_irq(&dev->power.lock);736737rpm_suspend_suppliers(dev);738739spin_lock_irq(&dev->power.lock);740}741742out:743trace_rpm_return_int(dev, _THIS_IP_, retval);744745return retval;746747fail:748dev_pm_disable_wake_irq_check(dev, true);749__update_runtime_status(dev, RPM_ACTIVE);750dev->power.deferred_resume = false;751wake_up_all(&dev->power.wait_queue);752753/*754* On transient errors, if the callback routine failed an autosuspend,755* and if the last_busy time has been updated so that there is a new756* autosuspend expiration time, automatically reschedule another757* autosuspend.758*/759if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&760pm_runtime_autosuspend_expiration(dev) != 0)761goto repeat;762763pm_runtime_cancel_pending(dev);764765goto out;766}767768/**769* rpm_resume - Carry out runtime resume of given device.770* @dev: Device to resume.771* @rpmflags: Flag bits.772*773* Check if the device's runtime PM status allows it to be resumed. Cancel774* any scheduled or pending requests. If another resume has been started775* earlier, either return immediately or wait for it to finish, depending on the776* RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in777* parallel with this function, either tell the other process to resume after778* suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC779* flag is set then queue a resume request; otherwise run the780* ->runtime_resume() callback directly. Queue an idle notification for the781* device if the resume succeeded.782*783* This function must be called under dev->power.lock with interrupts disabled.784*/785static int rpm_resume(struct device *dev, int rpmflags)786__releases(&dev->power.lock) __acquires(&dev->power.lock)787{788int (*callback)(struct device *);789struct device *parent = NULL;790int retval = 0;791792trace_rpm_resume(dev, rpmflags);793794repeat:795if (dev->power.runtime_error) {796retval = -EINVAL;797} else if (dev->power.disable_depth > 0) {798if (dev->power.runtime_status == RPM_ACTIVE &&799dev->power.last_status == RPM_ACTIVE)800retval = 1;801else if (rpmflags & RPM_TRANSPARENT)802goto out;803else804retval = -EACCES;805}806if (retval)807goto out;808809/*810* Other scheduled or pending requests need to be canceled. Small811* optimization: If an autosuspend timer is running, leave it running812* rather than cancelling it now only to restart it again in the near813* future.814*/815dev->power.request = RPM_REQ_NONE;816if (!dev->power.timer_autosuspends)817pm_runtime_deactivate_timer(dev);818819if (dev->power.runtime_status == RPM_ACTIVE) {820retval = 1;821goto out;822}823824if (dev->power.runtime_status == RPM_RESUMING ||825dev->power.runtime_status == RPM_SUSPENDING) {826DEFINE_WAIT(wait);827828if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {829if (dev->power.runtime_status == RPM_SUSPENDING) {830dev->power.deferred_resume = true;831if (rpmflags & RPM_NOWAIT)832retval = -EINPROGRESS;833} else {834retval = -EINPROGRESS;835}836goto out;837}838839if (dev->power.irq_safe) {840spin_unlock(&dev->power.lock);841842cpu_relax();843844spin_lock(&dev->power.lock);845goto repeat;846}847848/* Wait for the operation carried out in parallel with us. */849for (;;) {850prepare_to_wait(&dev->power.wait_queue, &wait,851TASK_UNINTERRUPTIBLE);852if (dev->power.runtime_status != RPM_RESUMING &&853dev->power.runtime_status != RPM_SUSPENDING)854break;855856spin_unlock_irq(&dev->power.lock);857858schedule();859860spin_lock_irq(&dev->power.lock);861}862finish_wait(&dev->power.wait_queue, &wait);863goto repeat;864}865866/*867* See if we can skip waking up the parent. This is safe only if868* power.no_callbacks is set, because otherwise we don't know whether869* the resume will actually succeed.870*/871if (dev->power.no_callbacks && !parent && dev->parent) {872spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);873if (dev->parent->power.disable_depth > 0 ||874dev->parent->power.ignore_children ||875dev->parent->power.runtime_status == RPM_ACTIVE) {876atomic_inc(&dev->parent->power.child_count);877spin_unlock(&dev->parent->power.lock);878retval = 1;879goto no_callback; /* Assume success. */880}881spin_unlock(&dev->parent->power.lock);882}883884/* Carry out an asynchronous or a synchronous resume. */885if (rpmflags & RPM_ASYNC) {886dev->power.request = RPM_REQ_RESUME;887if (!dev->power.request_pending) {888dev->power.request_pending = true;889queue_work(pm_wq, &dev->power.work);890}891retval = 0;892goto out;893}894895if (!parent && dev->parent) {896/*897* Increment the parent's usage counter and resume it if898* necessary. Not needed if dev is irq-safe; then the899* parent is permanently resumed.900*/901parent = dev->parent;902if (dev->power.irq_safe)903goto skip_parent;904905spin_unlock(&dev->power.lock);906907pm_runtime_get_noresume(parent);908909spin_lock(&parent->power.lock);910/*911* Resume the parent if it has runtime PM enabled and not been912* set to ignore its children.913*/914if (!parent->power.disable_depth &&915!parent->power.ignore_children) {916rpm_resume(parent, 0);917if (parent->power.runtime_status != RPM_ACTIVE)918retval = -EBUSY;919}920spin_unlock(&parent->power.lock);921922spin_lock(&dev->power.lock);923if (retval)924goto out;925926goto repeat;927}928skip_parent:929930if (dev->power.no_callbacks)931goto no_callback; /* Assume success. */932933__update_runtime_status(dev, RPM_RESUMING);934935callback = RPM_GET_CALLBACK(dev, runtime_resume);936937dev_pm_disable_wake_irq_check(dev, false);938retval = rpm_callback(callback, dev);939if (retval) {940__update_runtime_status(dev, RPM_SUSPENDED);941pm_runtime_cancel_pending(dev);942dev_pm_enable_wake_irq_check(dev, false);943} else {944no_callback:945__update_runtime_status(dev, RPM_ACTIVE);946pm_runtime_mark_last_busy(dev);947if (parent)948atomic_inc(&parent->power.child_count);949}950wake_up_all(&dev->power.wait_queue);951952if (retval >= 0)953rpm_idle(dev, RPM_ASYNC);954955out:956if (parent && !dev->power.irq_safe) {957spin_unlock_irq(&dev->power.lock);958959pm_runtime_put(parent);960961spin_lock_irq(&dev->power.lock);962}963964trace_rpm_return_int(dev, _THIS_IP_, retval);965966return retval;967}968969/**970* pm_runtime_work - Universal runtime PM work function.971* @work: Work structure used for scheduling the execution of this function.972*973* Use @work to get the device object the work is to be done for, determine what974* is to be done and execute the appropriate runtime PM function.975*/976static void pm_runtime_work(struct work_struct *work)977{978struct device *dev = container_of(work, struct device, power.work);979enum rpm_request req;980981spin_lock_irq(&dev->power.lock);982983if (!dev->power.request_pending)984goto out;985986req = dev->power.request;987dev->power.request = RPM_REQ_NONE;988dev->power.request_pending = false;989990switch (req) {991case RPM_REQ_NONE:992break;993case RPM_REQ_IDLE:994rpm_idle(dev, RPM_NOWAIT);995break;996case RPM_REQ_SUSPEND:997rpm_suspend(dev, RPM_NOWAIT);998break;999case RPM_REQ_AUTOSUSPEND:1000rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);1001break;1002case RPM_REQ_RESUME:1003rpm_resume(dev, RPM_NOWAIT);1004break;1005}10061007out:1008spin_unlock_irq(&dev->power.lock);1009}10101011/**1012* pm_suspend_timer_fn - Timer function for pm_schedule_suspend().1013* @timer: hrtimer used by pm_schedule_suspend().1014*1015* Check if the time is right and queue a suspend request.1016*/1017static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)1018{1019struct device *dev = container_of(timer, struct device, power.suspend_timer);1020unsigned long flags;1021u64 expires;10221023spin_lock_irqsave(&dev->power.lock, flags);10241025expires = dev->power.timer_expires;1026/*1027* If 'expires' is after the current time, we've been called1028* too early.1029*/1030if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {1031dev->power.timer_expires = 0;1032rpm_suspend(dev, dev->power.timer_autosuspends ?1033(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);1034}10351036spin_unlock_irqrestore(&dev->power.lock, flags);10371038return HRTIMER_NORESTART;1039}10401041/**1042* pm_schedule_suspend - Set up a timer to submit a suspend request in future.1043* @dev: Device to suspend.1044* @delay: Time to wait before submitting a suspend request, in milliseconds.1045*/1046int pm_schedule_suspend(struct device *dev, unsigned int delay)1047{1048unsigned long flags;1049u64 expires;1050int retval;10511052spin_lock_irqsave(&dev->power.lock, flags);10531054if (!delay) {1055retval = rpm_suspend(dev, RPM_ASYNC);1056goto out;1057}10581059retval = rpm_check_suspend_allowed(dev);1060if (retval)1061goto out;10621063/* Other scheduled or pending requests need to be canceled. */1064pm_runtime_cancel_pending(dev);10651066expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;1067dev->power.timer_expires = expires;1068dev->power.timer_autosuspends = 0;1069hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);10701071out:1072spin_unlock_irqrestore(&dev->power.lock, flags);10731074return retval;1075}1076EXPORT_SYMBOL_GPL(pm_schedule_suspend);10771078static int rpm_drop_usage_count(struct device *dev)1079{1080int ret;10811082ret = atomic_sub_return(1, &dev->power.usage_count);1083if (ret >= 0)1084return ret;10851086/*1087* Because rpm_resume() does not check the usage counter, it will resume1088* the device even if the usage counter is 0 or negative, so it is1089* sufficient to increment the usage counter here to reverse the change1090* made above.1091*/1092atomic_inc(&dev->power.usage_count);1093dev_warn(dev, "Runtime PM usage count underflow!\n");1094return -EINVAL;1095}10961097/**1098* __pm_runtime_idle - Entry point for runtime idle operations.1099* @dev: Device to send idle notification for.1100* @rpmflags: Flag bits.1101*1102* If the RPM_GET_PUT flag is set, decrement the device's usage count and1103* return immediately if it is larger than zero (if it becomes negative, log a1104* warning, increment it, and return an error). Then carry out an idle1105* notification, either synchronous or asynchronous.1106*1107* This routine may be called in atomic context if the RPM_ASYNC flag is set,1108* or if pm_runtime_irq_safe() has been called.1109*/1110int __pm_runtime_idle(struct device *dev, int rpmflags)1111{1112unsigned long flags;1113int retval;11141115if (rpmflags & RPM_GET_PUT) {1116retval = rpm_drop_usage_count(dev);1117if (retval < 0) {1118return retval;1119} else if (retval > 0) {1120trace_rpm_usage(dev, rpmflags);1121return 0;1122}1123}11241125might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);11261127spin_lock_irqsave(&dev->power.lock, flags);1128retval = rpm_idle(dev, rpmflags);1129spin_unlock_irqrestore(&dev->power.lock, flags);11301131return retval;1132}1133EXPORT_SYMBOL_GPL(__pm_runtime_idle);11341135/**1136* __pm_runtime_suspend - Entry point for runtime put/suspend operations.1137* @dev: Device to suspend.1138* @rpmflags: Flag bits.1139*1140* If the RPM_GET_PUT flag is set, decrement the device's usage count and1141* return immediately if it is larger than zero (if it becomes negative, log a1142* warning, increment it, and return an error). Then carry out a suspend,1143* either synchronous or asynchronous.1144*1145* This routine may be called in atomic context if the RPM_ASYNC flag is set,1146* or if pm_runtime_irq_safe() has been called.1147*/1148int __pm_runtime_suspend(struct device *dev, int rpmflags)1149{1150unsigned long flags;1151int retval;11521153if (rpmflags & RPM_GET_PUT) {1154retval = rpm_drop_usage_count(dev);1155if (retval < 0) {1156return retval;1157} else if (retval > 0) {1158trace_rpm_usage(dev, rpmflags);1159return 0;1160}1161}11621163might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);11641165spin_lock_irqsave(&dev->power.lock, flags);1166retval = rpm_suspend(dev, rpmflags);1167spin_unlock_irqrestore(&dev->power.lock, flags);11681169return retval;1170}1171EXPORT_SYMBOL_GPL(__pm_runtime_suspend);11721173/**1174* __pm_runtime_resume - Entry point for runtime resume operations.1175* @dev: Device to resume.1176* @rpmflags: Flag bits.1177*1178* If the RPM_GET_PUT flag is set, increment the device's usage count. Then1179* carry out a resume, either synchronous or asynchronous.1180*1181* This routine may be called in atomic context if the RPM_ASYNC flag is set,1182* or if pm_runtime_irq_safe() has been called.1183*/1184int __pm_runtime_resume(struct device *dev, int rpmflags)1185{1186unsigned long flags;1187int retval;11881189might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&1190dev->power.runtime_status != RPM_ACTIVE);11911192if (rpmflags & RPM_GET_PUT)1193atomic_inc(&dev->power.usage_count);11941195spin_lock_irqsave(&dev->power.lock, flags);1196retval = rpm_resume(dev, rpmflags);1197spin_unlock_irqrestore(&dev->power.lock, flags);11981199return retval;1200}1201EXPORT_SYMBOL_GPL(__pm_runtime_resume);12021203/**1204* pm_runtime_get_conditional - Conditionally bump up device usage counter.1205* @dev: Device to handle.1206* @ign_usage_count: Whether or not to look at the current usage counter value.1207*1208* Return -EINVAL if runtime PM is disabled for @dev.1209*1210* Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count1211* is set, or (2) @dev is not ignoring children and its active child count is1212* nonzero, or (3) the runtime PM usage counter of @dev is not zero, increment1213* the usage counter of @dev and return 1.1214*1215* Otherwise, return 0 without changing the usage counter.1216*1217* If @ign_usage_count is %true, this function can be used to prevent suspending1218* the device when its runtime PM status is %RPM_ACTIVE.1219*1220* If @ign_usage_count is %false, this function can be used to prevent1221* suspending the device when both its runtime PM status is %RPM_ACTIVE and its1222* runtime PM usage counter is not zero.1223*1224* The caller is responsible for decrementing the runtime PM usage counter of1225* @dev after this function has returned a positive value for it.1226*/1227static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)1228{1229unsigned long flags;1230int retval;12311232spin_lock_irqsave(&dev->power.lock, flags);1233if (dev->power.disable_depth > 0) {1234retval = -EINVAL;1235} else if (dev->power.runtime_status != RPM_ACTIVE) {1236retval = 0;1237} else if (ign_usage_count || (!dev->power.ignore_children &&1238atomic_read(&dev->power.child_count) > 0)) {1239retval = 1;1240atomic_inc(&dev->power.usage_count);1241} else {1242retval = atomic_inc_not_zero(&dev->power.usage_count);1243}1244trace_rpm_usage(dev, 0);1245spin_unlock_irqrestore(&dev->power.lock, flags);12461247return retval;1248}12491250/**1251* pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is1252* in active state1253* @dev: Target device.1254*1255* Increment the runtime PM usage counter of @dev if its runtime PM status is1256* %RPM_ACTIVE, in which case it returns 1. If the device is in a different1257* state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the1258* device, in which case also the usage_count will remain unmodified.1259*/1260int pm_runtime_get_if_active(struct device *dev)1261{1262return pm_runtime_get_conditional(dev, true);1263}1264EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);12651266/**1267* pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.1268* @dev: Target device.1269*1270* Increment the runtime PM usage counter of @dev if its runtime PM status is1271* %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not1272* ignoring children and its active child count is nonzero. 1 is returned in1273* this case.1274*1275* If @dev is in a different state or it is not in use (that is, its usage1276* counter is 0, or it is ignoring children, or its active child count is 0),1277* 0 is returned.1278*1279* -EINVAL is returned if runtime PM is disabled for the device, in which case1280* also the usage counter of @dev is not updated.1281*/1282int pm_runtime_get_if_in_use(struct device *dev)1283{1284return pm_runtime_get_conditional(dev, false);1285}1286EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);12871288/**1289* __pm_runtime_set_status - Set runtime PM status of a device.1290* @dev: Device to handle.1291* @status: New runtime PM status of the device.1292*1293* If runtime PM of the device is disabled or its power.runtime_error field is1294* different from zero, the status may be changed either to RPM_ACTIVE, or to1295* RPM_SUSPENDED, as long as that reflects the actual state of the device.1296* However, if the device has a parent and the parent is not active, and the1297* parent's power.ignore_children flag is unset, the device's status cannot be1298* set to RPM_ACTIVE, so -EBUSY is returned in that case.1299*1300* If successful, __pm_runtime_set_status() clears the power.runtime_error field1301* and the device parent's counter of unsuspended children is modified to1302* reflect the new status. If the new status is RPM_SUSPENDED, an idle1303* notification request for the parent is submitted.1304*1305* If @dev has any suppliers (as reflected by device links to them), and @status1306* is RPM_ACTIVE, they will be activated upfront and if the activation of one1307* of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead1308* of the @status value) and the suppliers will be deacticated on exit. The1309* error returned by the failing supplier activation will be returned in that1310* case.1311*/1312int __pm_runtime_set_status(struct device *dev, unsigned int status)1313{1314struct device *parent = dev->parent;1315bool notify_parent = false;1316unsigned long flags;1317int error = 0;13181319if (status != RPM_ACTIVE && status != RPM_SUSPENDED)1320return -EINVAL;13211322spin_lock_irqsave(&dev->power.lock, flags);13231324/*1325* Prevent PM-runtime from being enabled for the device or return an1326* error if it is enabled already and working.1327*/1328if (dev->power.runtime_error || dev->power.disable_depth)1329dev->power.disable_depth++;1330else1331error = -EAGAIN;13321333spin_unlock_irqrestore(&dev->power.lock, flags);13341335if (error)1336return error;13371338/*1339* If the new status is RPM_ACTIVE, the suppliers can be activated1340* upfront regardless of the current status, because next time1341* rpm_put_suppliers() runs, the rpm_active refcounts of the links1342* involved will be dropped down to one anyway.1343*/1344if (status == RPM_ACTIVE) {1345int idx = device_links_read_lock();13461347error = rpm_get_suppliers(dev);1348if (error)1349status = RPM_SUSPENDED;13501351device_links_read_unlock(idx);1352}13531354spin_lock_irqsave(&dev->power.lock, flags);13551356if (dev->power.runtime_status == status || !parent)1357goto out_set;13581359if (status == RPM_SUSPENDED) {1360atomic_add_unless(&parent->power.child_count, -1, 0);1361notify_parent = !parent->power.ignore_children;1362} else {1363spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);13641365/*1366* It is invalid to put an active child under a parent that is1367* not active, has runtime PM enabled and the1368* 'power.ignore_children' flag unset.1369*/1370if (!parent->power.disable_depth &&1371!parent->power.ignore_children &&1372parent->power.runtime_status != RPM_ACTIVE) {1373dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",1374dev_name(dev),1375dev_name(parent));1376error = -EBUSY;1377} else if (dev->power.runtime_status == RPM_SUSPENDED) {1378atomic_inc(&parent->power.child_count);1379}13801381spin_unlock(&parent->power.lock);13821383if (error) {1384status = RPM_SUSPENDED;1385goto out;1386}1387}13881389out_set:1390__update_runtime_status(dev, status);1391if (!error)1392dev->power.runtime_error = 0;13931394out:1395spin_unlock_irqrestore(&dev->power.lock, flags);13961397if (notify_parent)1398pm_request_idle(parent);13991400if (status == RPM_SUSPENDED) {1401int idx = device_links_read_lock();14021403rpm_put_suppliers(dev);14041405device_links_read_unlock(idx);1406}14071408pm_runtime_enable(dev);14091410return error;1411}1412EXPORT_SYMBOL_GPL(__pm_runtime_set_status);14131414/**1415* __pm_runtime_barrier - Cancel pending requests and wait for completions.1416* @dev: Device to handle.1417*1418* Flush all pending requests for the device from pm_wq and wait for all1419* runtime PM operations involving the device in progress to complete.1420*1421* Should be called under dev->power.lock with interrupts disabled.1422*/1423static void __pm_runtime_barrier(struct device *dev)1424{1425pm_runtime_deactivate_timer(dev);14261427if (dev->power.request_pending) {1428dev->power.request = RPM_REQ_NONE;1429spin_unlock_irq(&dev->power.lock);14301431cancel_work_sync(&dev->power.work);14321433spin_lock_irq(&dev->power.lock);1434dev->power.request_pending = false;1435}14361437if (dev->power.runtime_status == RPM_SUSPENDING ||1438dev->power.runtime_status == RPM_RESUMING ||1439dev->power.idle_notification) {1440DEFINE_WAIT(wait);14411442/* Suspend, wake-up or idle notification in progress. */1443for (;;) {1444prepare_to_wait(&dev->power.wait_queue, &wait,1445TASK_UNINTERRUPTIBLE);1446if (dev->power.runtime_status != RPM_SUSPENDING1447&& dev->power.runtime_status != RPM_RESUMING1448&& !dev->power.idle_notification)1449break;1450spin_unlock_irq(&dev->power.lock);14511452schedule();14531454spin_lock_irq(&dev->power.lock);1455}1456finish_wait(&dev->power.wait_queue, &wait);1457}1458}14591460/**1461* pm_runtime_barrier - Flush pending requests and wait for completions.1462* @dev: Device to handle.1463*1464* Prevent the device from being suspended by incrementing its usage counter and1465* if there's a pending resume request for the device, wake the device up.1466* Next, make sure that all pending requests for the device have been flushed1467* from pm_wq and wait for all runtime PM operations involving the device in1468* progress to complete.1469*/1470void pm_runtime_barrier(struct device *dev)1471{1472pm_runtime_get_noresume(dev);1473spin_lock_irq(&dev->power.lock);14741475if (dev->power.request_pending1476&& dev->power.request == RPM_REQ_RESUME)1477rpm_resume(dev, 0);14781479__pm_runtime_barrier(dev);14801481spin_unlock_irq(&dev->power.lock);1482pm_runtime_put_noidle(dev);1483}1484EXPORT_SYMBOL_GPL(pm_runtime_barrier);14851486bool pm_runtime_block_if_disabled(struct device *dev)1487{1488bool ret;14891490spin_lock_irq(&dev->power.lock);14911492ret = !pm_runtime_enabled(dev);1493if (ret && dev->power.last_status == RPM_INVALID)1494dev->power.last_status = RPM_BLOCKED;14951496spin_unlock_irq(&dev->power.lock);14971498return ret;1499}15001501void pm_runtime_unblock(struct device *dev)1502{1503spin_lock_irq(&dev->power.lock);15041505if (dev->power.last_status == RPM_BLOCKED)1506dev->power.last_status = RPM_INVALID;15071508spin_unlock_irq(&dev->power.lock);1509}15101511void __pm_runtime_disable(struct device *dev, bool check_resume)1512{1513spin_lock_irq(&dev->power.lock);15141515if (dev->power.disable_depth > 0) {1516dev->power.disable_depth++;1517goto out;1518}15191520/*1521* Wake up the device if there's a resume request pending, because that1522* means there probably is some I/O to process and disabling runtime PM1523* shouldn't prevent the device from processing the I/O.1524*/1525if (check_resume && dev->power.request_pending &&1526dev->power.request == RPM_REQ_RESUME) {1527/*1528* Prevent suspends and idle notifications from being carried1529* out after we have woken up the device.1530*/1531pm_runtime_get_noresume(dev);15321533rpm_resume(dev, 0);15341535pm_runtime_put_noidle(dev);1536}15371538/* Update time accounting before disabling PM-runtime. */1539update_pm_runtime_accounting(dev);15401541if (!dev->power.disable_depth++) {1542__pm_runtime_barrier(dev);1543dev->power.last_status = dev->power.runtime_status;1544}15451546out:1547spin_unlock_irq(&dev->power.lock);1548}1549EXPORT_SYMBOL_GPL(__pm_runtime_disable);15501551/**1552* pm_runtime_enable - Enable runtime PM of a device.1553* @dev: Device to handle.1554*/1555void pm_runtime_enable(struct device *dev)1556{1557unsigned long flags;15581559spin_lock_irqsave(&dev->power.lock, flags);15601561if (!dev->power.disable_depth) {1562dev_warn(dev, "Unbalanced %s!\n", __func__);1563goto out;1564}15651566if (--dev->power.disable_depth > 0)1567goto out;15681569if (dev->power.last_status == RPM_BLOCKED) {1570dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");1571dump_stack();1572}1573dev->power.last_status = RPM_INVALID;1574dev->power.accounting_timestamp = ktime_get_mono_fast_ns();15751576if (dev->power.runtime_status == RPM_SUSPENDED &&1577!dev->power.ignore_children &&1578atomic_read(&dev->power.child_count) > 0)1579dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");15801581out:1582spin_unlock_irqrestore(&dev->power.lock, flags);1583}1584EXPORT_SYMBOL_GPL(pm_runtime_enable);15851586static void pm_runtime_set_suspended_action(void *data)1587{1588pm_runtime_set_suspended(data);1589}15901591/**1592* devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.1593*1594* @dev: Device to handle.1595*/1596int devm_pm_runtime_set_active_enabled(struct device *dev)1597{1598int err;15991600err = pm_runtime_set_active(dev);1601if (err)1602return err;16031604err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);1605if (err)1606return err;16071608return devm_pm_runtime_enable(dev);1609}1610EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);16111612static void pm_runtime_disable_action(void *data)1613{1614pm_runtime_dont_use_autosuspend(data);1615pm_runtime_disable(data);1616}16171618/**1619* devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.1620*1621* NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for1622* you at driver exit time if needed.1623*1624* @dev: Device to handle.1625*/1626int devm_pm_runtime_enable(struct device *dev)1627{1628pm_runtime_enable(dev);16291630return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);1631}1632EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);16331634static void pm_runtime_put_noidle_action(void *data)1635{1636pm_runtime_put_noidle(data);1637}16381639/**1640* devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.1641*1642* @dev: Device to handle.1643*/1644int devm_pm_runtime_get_noresume(struct device *dev)1645{1646pm_runtime_get_noresume(dev);16471648return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);1649}1650EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);16511652/**1653* pm_runtime_forbid - Block runtime PM of a device.1654* @dev: Device to handle.1655*1656* Resume @dev if already suspended and block runtime suspend of @dev in such1657* a way that it can be unblocked via the /sys/devices/.../power/control1658* interface, or otherwise by calling pm_runtime_allow().1659*1660* Calling this function many times in a row has the same effect as calling it1661* once.1662*/1663void pm_runtime_forbid(struct device *dev)1664{1665spin_lock_irq(&dev->power.lock);1666if (!dev->power.runtime_auto)1667goto out;16681669dev->power.runtime_auto = false;1670atomic_inc(&dev->power.usage_count);1671rpm_resume(dev, 0);16721673out:1674spin_unlock_irq(&dev->power.lock);1675}1676EXPORT_SYMBOL_GPL(pm_runtime_forbid);16771678/**1679* pm_runtime_allow - Unblock runtime PM of a device.1680* @dev: Device to handle.1681*1682* Unblock runtime suspend of @dev after it has been blocked by1683* pm_runtime_forbid() (for instance, if it has been blocked via the1684* /sys/devices/.../power/control interface), check if @dev can be1685* suspended and suspend it in that case.1686*1687* Calling this function many times in a row has the same effect as calling it1688* once.1689*/1690void pm_runtime_allow(struct device *dev)1691{1692int ret;16931694spin_lock_irq(&dev->power.lock);1695if (dev->power.runtime_auto)1696goto out;16971698dev->power.runtime_auto = true;1699ret = rpm_drop_usage_count(dev);1700if (ret == 0)1701rpm_idle(dev, RPM_AUTO | RPM_ASYNC);1702else if (ret > 0)1703trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);17041705out:1706spin_unlock_irq(&dev->power.lock);1707}1708EXPORT_SYMBOL_GPL(pm_runtime_allow);17091710/**1711* pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.1712* @dev: Device to handle.1713*1714* Set the power.no_callbacks flag, which tells the PM core that this1715* device is power-managed through its parent and has no runtime PM1716* callbacks of its own. The runtime sysfs attributes will be removed.1717*/1718void pm_runtime_no_callbacks(struct device *dev)1719{1720spin_lock_irq(&dev->power.lock);1721dev->power.no_callbacks = 1;1722spin_unlock_irq(&dev->power.lock);1723if (device_is_registered(dev))1724rpm_sysfs_remove(dev);1725}1726EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);17271728/**1729* pm_runtime_irq_safe - Leave interrupts disabled during callbacks.1730* @dev: Device to handle1731*1732* Set the power.irq_safe flag, which tells the PM core that the1733* ->runtime_suspend() and ->runtime_resume() callbacks for this device should1734* always be invoked with the spinlock held and interrupts disabled. It also1735* causes the parent's usage counter to be permanently incremented, preventing1736* the parent from runtime suspending -- otherwise an irq-safe child might have1737* to wait for a non-irq-safe parent.1738*/1739void pm_runtime_irq_safe(struct device *dev)1740{1741if (dev->parent)1742pm_runtime_get_sync(dev->parent);17431744spin_lock_irq(&dev->power.lock);1745dev->power.irq_safe = 1;1746spin_unlock_irq(&dev->power.lock);1747}1748EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);17491750/**1751* update_autosuspend - Handle a change to a device's autosuspend settings.1752* @dev: Device to handle.1753* @old_delay: The former autosuspend_delay value.1754* @old_use: The former use_autosuspend value.1755*1756* Prevent runtime suspend if the new delay is negative and use_autosuspend is1757* set; otherwise allow it. Send an idle notification if suspends are allowed.1758*1759* This function must be called under dev->power.lock with interrupts disabled.1760*/1761static void update_autosuspend(struct device *dev, int old_delay, int old_use)1762{1763int delay = dev->power.autosuspend_delay;17641765/* Should runtime suspend be prevented now? */1766if (dev->power.use_autosuspend && delay < 0) {17671768/* If it used to be allowed then prevent it. */1769if (!old_use || old_delay >= 0) {1770atomic_inc(&dev->power.usage_count);1771rpm_resume(dev, 0);1772} else {1773trace_rpm_usage(dev, 0);1774}1775}17761777/* Runtime suspend should be allowed now. */1778else {17791780/* If it used to be prevented then allow it. */1781if (old_use && old_delay < 0)1782atomic_dec(&dev->power.usage_count);17831784/* Maybe we can autosuspend now. */1785rpm_idle(dev, RPM_AUTO);1786}1787}17881789/**1790* pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.1791* @dev: Device to handle.1792* @delay: Value of the new delay in milliseconds.1793*1794* Set the device's power.autosuspend_delay value. If it changes to negative1795* and the power.use_autosuspend flag is set, prevent runtime suspends. If it1796* changes the other way, allow runtime suspends.1797*/1798void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)1799{1800int old_delay, old_use;18011802spin_lock_irq(&dev->power.lock);1803old_delay = dev->power.autosuspend_delay;1804old_use = dev->power.use_autosuspend;1805dev->power.autosuspend_delay = delay;1806update_autosuspend(dev, old_delay, old_use);1807spin_unlock_irq(&dev->power.lock);1808}1809EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);18101811/**1812* __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.1813* @dev: Device to handle.1814* @use: New value for use_autosuspend.1815*1816* Set the device's power.use_autosuspend flag, and allow or prevent runtime1817* suspends as needed.1818*/1819void __pm_runtime_use_autosuspend(struct device *dev, bool use)1820{1821int old_delay, old_use;18221823spin_lock_irq(&dev->power.lock);1824old_delay = dev->power.autosuspend_delay;1825old_use = dev->power.use_autosuspend;1826dev->power.use_autosuspend = use;1827update_autosuspend(dev, old_delay, old_use);1828spin_unlock_irq(&dev->power.lock);1829}1830EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);18311832/**1833* pm_runtime_init - Initialize runtime PM fields in given device object.1834* @dev: Device object to initialize.1835*/1836void pm_runtime_init(struct device *dev)1837{1838dev->power.runtime_status = RPM_SUSPENDED;1839dev->power.last_status = RPM_INVALID;1840dev->power.idle_notification = false;18411842dev->power.disable_depth = 1;1843atomic_set(&dev->power.usage_count, 0);18441845dev->power.runtime_error = 0;18461847atomic_set(&dev->power.child_count, 0);1848pm_suspend_ignore_children(dev, false);1849dev->power.runtime_auto = true;18501851dev->power.request_pending = false;1852dev->power.request = RPM_REQ_NONE;1853dev->power.deferred_resume = false;1854dev->power.needs_force_resume = false;1855INIT_WORK(&dev->power.work, pm_runtime_work);18561857dev->power.timer_expires = 0;1858hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,1859HRTIMER_MODE_ABS);18601861init_waitqueue_head(&dev->power.wait_queue);1862}18631864/**1865* pm_runtime_reinit - Re-initialize runtime PM fields in given device object.1866* @dev: Device object to re-initialize.1867*/1868void pm_runtime_reinit(struct device *dev)1869{1870if (pm_runtime_enabled(dev))1871return;18721873if (dev->power.runtime_status == RPM_ACTIVE)1874pm_runtime_set_suspended(dev);18751876if (dev->power.irq_safe) {1877spin_lock_irq(&dev->power.lock);1878dev->power.irq_safe = 0;1879spin_unlock_irq(&dev->power.lock);1880if (dev->parent)1881pm_runtime_put(dev->parent);1882}1883/*1884* Clear power.needs_force_resume in case it has been set by1885* pm_runtime_force_suspend() invoked from a driver remove callback.1886*/1887dev->power.needs_force_resume = false;1888}18891890/**1891* pm_runtime_remove - Prepare for removing a device from device hierarchy.1892* @dev: Device object being removed from device hierarchy.1893*/1894void pm_runtime_remove(struct device *dev)1895{1896__pm_runtime_disable(dev, false);1897pm_runtime_reinit(dev);1898}18991900/**1901* pm_runtime_get_suppliers - Resume and reference-count supplier devices.1902* @dev: Consumer device.1903*/1904void pm_runtime_get_suppliers(struct device *dev)1905{1906struct device_link *link;1907int idx;19081909idx = device_links_read_lock();19101911dev_for_each_link_to_supplier(link, dev)1912if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {1913link->supplier_preactivated = true;1914pm_runtime_get_sync(link->supplier);1915}19161917device_links_read_unlock(idx);1918}19191920/**1921* pm_runtime_put_suppliers - Drop references to supplier devices.1922* @dev: Consumer device.1923*/1924void pm_runtime_put_suppliers(struct device *dev)1925{1926struct device_link *link;1927int idx;19281929idx = device_links_read_lock();19301931list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,1932device_links_read_lock_held())1933if (link->supplier_preactivated) {1934link->supplier_preactivated = false;1935pm_runtime_put(link->supplier);1936}19371938device_links_read_unlock(idx);1939}19401941void pm_runtime_new_link(struct device *dev)1942{1943spin_lock_irq(&dev->power.lock);1944dev->power.links_count++;1945spin_unlock_irq(&dev->power.lock);1946}19471948static void pm_runtime_drop_link_count(struct device *dev)1949{1950spin_lock_irq(&dev->power.lock);1951WARN_ON(dev->power.links_count == 0);1952dev->power.links_count--;1953spin_unlock_irq(&dev->power.lock);1954}19551956/**1957* pm_runtime_drop_link - Prepare for device link removal.1958* @link: Device link going away.1959*1960* Drop the link count of the consumer end of @link and decrement the supplier1961* device's runtime PM usage counter as many times as needed to drop all of the1962* PM runtime reference to it from the consumer.1963*/1964void pm_runtime_drop_link(struct device_link *link)1965{1966if (!device_link_test(link, DL_FLAG_PM_RUNTIME))1967return;19681969pm_runtime_drop_link_count(link->consumer);1970pm_runtime_release_supplier(link);1971pm_request_idle(link->supplier);1972}19731974static pm_callback_t get_callback(struct device *dev, size_t cb_offset)1975{1976/*1977* Setting power.strict_midlayer means that the middle layer1978* code does not want its runtime PM callbacks to be invoked via1979* pm_runtime_force_suspend() and pm_runtime_force_resume(), so1980* return a direct pointer to the driver callback in that case.1981*/1982if (dev_pm_strict_midlayer_is_set(dev))1983return __rpm_get_driver_callback(dev, cb_offset);19841985return __rpm_get_callback(dev, cb_offset);1986}19871988#define GET_CALLBACK(dev, callback) \1989get_callback(dev, offsetof(struct dev_pm_ops, callback))19901991/**1992* pm_runtime_force_suspend - Force a device into suspend state if needed.1993* @dev: Device to suspend.1994*1995* Disable runtime PM so we safely can check the device's runtime PM status and1996* if it is active, invoke its ->runtime_suspend callback to suspend it and1997* change its runtime PM status field to RPM_SUSPENDED. Also, if the device's1998* usage and children counters don't indicate that the device was in use before1999* the system-wide transition under way, decrement its parent's children counter2000* (if there is a parent). Keep runtime PM disabled to preserve the state2001* unless we encounter errors.2002*2003* Typically this function may be invoked from a system suspend callback to make2004* sure the device is put into low power state and it should only be used during2005* system-wide PM transitions to sleep states. It assumes that the analogous2006* pm_runtime_force_resume() will be used to resume the device.2007*/2008int pm_runtime_force_suspend(struct device *dev)2009{2010int (*callback)(struct device *);2011int ret;20122013pm_runtime_disable(dev);2014if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume)2015return 0;20162017callback = GET_CALLBACK(dev, runtime_suspend);20182019dev_pm_enable_wake_irq_check(dev, true);2020ret = callback ? callback(dev) : 0;2021if (ret)2022goto err;20232024dev_pm_enable_wake_irq_complete(dev);20252026/*2027* If the device can stay in suspend after the system-wide transition2028* to the working state that will follow, drop the children counter of2029* its parent and the usage counters of its suppliers. Otherwise, set2030* power.needs_force_resume to let pm_runtime_force_resume() know that2031* the device needs to be taken care of and to prevent this function2032* from handling the device again in case the device is passed to it2033* once more subsequently.2034*/2035if (pm_runtime_need_not_resume(dev))2036pm_runtime_set_suspended(dev);2037else2038dev->power.needs_force_resume = true;20392040return 0;20412042err:2043dev_pm_disable_wake_irq_check(dev, true);2044pm_runtime_enable(dev);2045return ret;2046}2047EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);20482049#ifdef CONFIG_PM_SLEEP20502051/**2052* pm_runtime_force_resume - Force a device into resume state if needed.2053* @dev: Device to resume.2054*2055* This function expects that either pm_runtime_force_suspend() has put the2056* device into a low-power state prior to calling it, or the device had been2057* runtime-suspended before the preceding system-wide suspend transition and it2058* was left in suspend during that transition.2059*2060* The actions carried out by pm_runtime_force_suspend(), or by a runtime2061* suspend in general, are reversed and the device is brought back into full2062* power if it is expected to be used on system resume, which is the case when2063* its needs_force_resume flag is set or when its smart_suspend flag is set and2064* its runtime PM status is "active".2065*2066* In other cases, the resume is deferred to be managed via runtime PM.2067*2068* Typically, this function may be invoked from a system resume callback.2069*/2070int pm_runtime_force_resume(struct device *dev)2071{2072int (*callback)(struct device *);2073int ret = 0;20742075if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) ||2076pm_runtime_status_suspended(dev)))2077goto out;20782079callback = GET_CALLBACK(dev, runtime_resume);20802081dev_pm_disable_wake_irq_check(dev, false);2082ret = callback ? callback(dev) : 0;2083if (ret) {2084pm_runtime_set_suspended(dev);2085dev_pm_enable_wake_irq_check(dev, false);2086goto out;2087}20882089pm_runtime_mark_last_busy(dev);20902091out:2092/*2093* The smart_suspend flag can be cleared here because it is not going2094* to be necessary until the next system-wide suspend transition that2095* will update it again.2096*/2097dev->power.smart_suspend = false;2098/*2099* Also clear needs_force_resume to make this function skip devices that2100* have been seen by it once.2101*/2102dev->power.needs_force_resume = false;21032104pm_runtime_enable(dev);2105return ret;2106}2107EXPORT_SYMBOL_GPL(pm_runtime_force_resume);21082109bool pm_runtime_need_not_resume(struct device *dev)2110{2111return atomic_read(&dev->power.usage_count) <= 1 &&2112(atomic_read(&dev->power.child_count) == 0 ||2113dev->power.ignore_children);2114}21152116#endif /* CONFIG_PM_SLEEP */211721182119