/*1* drivers/base/power/runtime.c - Helper functions for device run-time PM2*3* Copyright (c) 2009 Rafael J. Wysocki <[email protected]>, Novell Inc.4* Copyright (C) 2010 Alan Stern <[email protected]>5*6* This file is released under the GPLv2.7*/89#include <linux/sched.h>10#include <linux/pm_runtime.h>11#include "power.h"1213static int rpm_resume(struct device *dev, int rpmflags);14static int rpm_suspend(struct device *dev, int rpmflags);1516/**17* update_pm_runtime_accounting - Update the time accounting of power states18* @dev: Device to update the accounting for19*20* In order to be able to have time accounting of the various power states21* (as used by programs such as PowerTOP to show the effectiveness of runtime22* PM), we need to track the time spent in each state.23* update_pm_runtime_accounting must be called each time before the24* runtime_status field is updated, to account the time in the old state25* correctly.26*/27void update_pm_runtime_accounting(struct device *dev)28{29unsigned long now = jiffies;30int delta;3132delta = now - dev->power.accounting_timestamp;3334if (delta < 0)35delta = 0;3637dev->power.accounting_timestamp = now;3839if (dev->power.disable_depth > 0)40return;4142if (dev->power.runtime_status == RPM_SUSPENDED)43dev->power.suspended_jiffies += delta;44else45dev->power.active_jiffies += delta;46}4748static void __update_runtime_status(struct device *dev, enum rpm_status status)49{50update_pm_runtime_accounting(dev);51dev->power.runtime_status = status;52}5354/**55* pm_runtime_deactivate_timer - Deactivate given device's suspend timer.56* @dev: Device to handle.57*/58static void pm_runtime_deactivate_timer(struct device *dev)59{60if (dev->power.timer_expires > 0) {61del_timer(&dev->power.suspend_timer);62dev->power.timer_expires = 0;63}64}6566/**67* pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.68* @dev: Device to handle.69*/70static void pm_runtime_cancel_pending(struct device *dev)71{72pm_runtime_deactivate_timer(dev);73/*74* In case there's a request pending, make sure its work function will75* return without doing anything.76*/77dev->power.request = RPM_REQ_NONE;78}7980/*81* pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.82* @dev: Device to handle.83*84* Compute the autosuspend-delay expiration time based on the device's85* power.last_busy time. If the delay has already expired or is disabled86* (negative) or the power.use_autosuspend flag isn't set, return 0.87* Otherwise return the expiration time in jiffies (adjusted to be nonzero).88*89* This function may be called either with or without dev->power.lock held.90* Either way it can be racy, since power.last_busy may be updated at any time.91*/92unsigned long pm_runtime_autosuspend_expiration(struct device *dev)93{94int autosuspend_delay;95long elapsed;96unsigned long last_busy;97unsigned long expires = 0;9899if (!dev->power.use_autosuspend)100goto out;101102autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);103if (autosuspend_delay < 0)104goto out;105106last_busy = ACCESS_ONCE(dev->power.last_busy);107elapsed = jiffies - last_busy;108if (elapsed < 0)109goto out; /* jiffies has wrapped around. */110111/*112* If the autosuspend_delay is >= 1 second, align the timer by rounding113* up to the nearest second.114*/115expires = last_busy + msecs_to_jiffies(autosuspend_delay);116if (autosuspend_delay >= 1000)117expires = round_jiffies(expires);118expires += !expires;119if (elapsed >= expires - last_busy)120expires = 0; /* Already expired. */121122out:123return expires;124}125EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);126127/**128* rpm_check_suspend_allowed - Test whether a device may be suspended.129* @dev: Device to test.130*/131static int rpm_check_suspend_allowed(struct device *dev)132{133int retval = 0;134135if (dev->power.runtime_error)136retval = -EINVAL;137else if (atomic_read(&dev->power.usage_count) > 0138|| dev->power.disable_depth > 0)139retval = -EAGAIN;140else if (!pm_children_suspended(dev))141retval = -EBUSY;142143/* Pending resume requests take precedence over suspends. */144else if ((dev->power.deferred_resume145&& dev->power.runtime_status == RPM_SUSPENDING)146|| (dev->power.request_pending147&& dev->power.request == RPM_REQ_RESUME))148retval = -EAGAIN;149else if (dev->power.runtime_status == RPM_SUSPENDED)150retval = 1;151152return retval;153}154155/**156* rpm_idle - Notify device bus type if the device can be suspended.157* @dev: Device to notify the bus type about.158* @rpmflags: Flag bits.159*160* Check if the device's run-time PM status allows it to be suspended. If161* another idle notification has been started earlier, return immediately. If162* the RPM_ASYNC flag is set then queue an idle-notification request; otherwise163* run the ->runtime_idle() callback directly.164*165* This function must be called under dev->power.lock with interrupts disabled.166*/167static int rpm_idle(struct device *dev, int rpmflags)168{169int (*callback)(struct device *);170int retval;171172retval = rpm_check_suspend_allowed(dev);173if (retval < 0)174; /* Conditions are wrong. */175176/* Idle notifications are allowed only in the RPM_ACTIVE state. */177else if (dev->power.runtime_status != RPM_ACTIVE)178retval = -EAGAIN;179180/*181* Any pending request other than an idle notification takes182* precedence over us, except that the timer may be running.183*/184else if (dev->power.request_pending &&185dev->power.request > RPM_REQ_IDLE)186retval = -EAGAIN;187188/* Act as though RPM_NOWAIT is always set. */189else if (dev->power.idle_notification)190retval = -EINPROGRESS;191if (retval)192goto out;193194/* Pending requests need to be canceled. */195dev->power.request = RPM_REQ_NONE;196197if (dev->power.no_callbacks) {198/* Assume ->runtime_idle() callback would have suspended. */199retval = rpm_suspend(dev, rpmflags);200goto out;201}202203/* Carry out an asynchronous or a synchronous idle notification. */204if (rpmflags & RPM_ASYNC) {205dev->power.request = RPM_REQ_IDLE;206if (!dev->power.request_pending) {207dev->power.request_pending = true;208queue_work(pm_wq, &dev->power.work);209}210goto out;211}212213dev->power.idle_notification = true;214215if (dev->pwr_domain)216callback = dev->pwr_domain->ops.runtime_idle;217else if (dev->type && dev->type->pm)218callback = dev->type->pm->runtime_idle;219else if (dev->class && dev->class->pm)220callback = dev->class->pm->runtime_idle;221else if (dev->bus && dev->bus->pm)222callback = dev->bus->pm->runtime_idle;223else224callback = NULL;225226if (callback) {227spin_unlock_irq(&dev->power.lock);228229callback(dev);230231spin_lock_irq(&dev->power.lock);232}233234dev->power.idle_notification = false;235wake_up_all(&dev->power.wait_queue);236237out:238return retval;239}240241/**242* rpm_callback - Run a given runtime PM callback for a given device.243* @cb: Runtime PM callback to run.244* @dev: Device to run the callback for.245*/246static int rpm_callback(int (*cb)(struct device *), struct device *dev)247__releases(&dev->power.lock) __acquires(&dev->power.lock)248{249int retval;250251if (!cb)252return -ENOSYS;253254if (dev->power.irq_safe) {255retval = cb(dev);256} else {257spin_unlock_irq(&dev->power.lock);258259retval = cb(dev);260261spin_lock_irq(&dev->power.lock);262}263dev->power.runtime_error = retval;264return retval;265}266267/**268* rpm_suspend - Carry out run-time suspend of given device.269* @dev: Device to suspend.270* @rpmflags: Flag bits.271*272* Check if the device's run-time PM status allows it to be suspended. If273* another suspend has been started earlier, either return immediately or wait274* for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a275* pending idle notification. If the RPM_ASYNC flag is set then queue a276* suspend request; otherwise run the ->runtime_suspend() callback directly.277* If a deferred resume was requested while the callback was running then carry278* it out; otherwise send an idle notification for the device (if the suspend279* failed) or for its parent (if the suspend succeeded).280*281* This function must be called under dev->power.lock with interrupts disabled.282*/283static int rpm_suspend(struct device *dev, int rpmflags)284__releases(&dev->power.lock) __acquires(&dev->power.lock)285{286int (*callback)(struct device *);287struct device *parent = NULL;288int retval;289290dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);291292repeat:293retval = rpm_check_suspend_allowed(dev);294295if (retval < 0)296; /* Conditions are wrong. */297298/* Synchronous suspends are not allowed in the RPM_RESUMING state. */299else if (dev->power.runtime_status == RPM_RESUMING &&300!(rpmflags & RPM_ASYNC))301retval = -EAGAIN;302if (retval)303goto out;304305/* If the autosuspend_delay time hasn't expired yet, reschedule. */306if ((rpmflags & RPM_AUTO)307&& dev->power.runtime_status != RPM_SUSPENDING) {308unsigned long expires = pm_runtime_autosuspend_expiration(dev);309310if (expires != 0) {311/* Pending requests need to be canceled. */312dev->power.request = RPM_REQ_NONE;313314/*315* Optimization: If the timer is already running and is316* set to expire at or before the autosuspend delay,317* avoid the overhead of resetting it. Just let it318* expire; pm_suspend_timer_fn() will take care of the319* rest.320*/321if (!(dev->power.timer_expires && time_before_eq(322dev->power.timer_expires, expires))) {323dev->power.timer_expires = expires;324mod_timer(&dev->power.suspend_timer, expires);325}326dev->power.timer_autosuspends = 1;327goto out;328}329}330331/* Other scheduled or pending requests need to be canceled. */332pm_runtime_cancel_pending(dev);333334if (dev->power.runtime_status == RPM_SUSPENDING) {335DEFINE_WAIT(wait);336337if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {338retval = -EINPROGRESS;339goto out;340}341342/* Wait for the other suspend running in parallel with us. */343for (;;) {344prepare_to_wait(&dev->power.wait_queue, &wait,345TASK_UNINTERRUPTIBLE);346if (dev->power.runtime_status != RPM_SUSPENDING)347break;348349spin_unlock_irq(&dev->power.lock);350351schedule();352353spin_lock_irq(&dev->power.lock);354}355finish_wait(&dev->power.wait_queue, &wait);356goto repeat;357}358359dev->power.deferred_resume = false;360if (dev->power.no_callbacks)361goto no_callback; /* Assume success. */362363/* Carry out an asynchronous or a synchronous suspend. */364if (rpmflags & RPM_ASYNC) {365dev->power.request = (rpmflags & RPM_AUTO) ?366RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;367if (!dev->power.request_pending) {368dev->power.request_pending = true;369queue_work(pm_wq, &dev->power.work);370}371goto out;372}373374__update_runtime_status(dev, RPM_SUSPENDING);375376if (dev->pwr_domain)377callback = dev->pwr_domain->ops.runtime_suspend;378else if (dev->type && dev->type->pm)379callback = dev->type->pm->runtime_suspend;380else if (dev->class && dev->class->pm)381callback = dev->class->pm->runtime_suspend;382else if (dev->bus && dev->bus->pm)383callback = dev->bus->pm->runtime_suspend;384else385callback = NULL;386387retval = rpm_callback(callback, dev);388if (retval) {389__update_runtime_status(dev, RPM_ACTIVE);390dev->power.deferred_resume = 0;391if (retval == -EAGAIN || retval == -EBUSY)392dev->power.runtime_error = 0;393else394pm_runtime_cancel_pending(dev);395} else {396no_callback:397__update_runtime_status(dev, RPM_SUSPENDED);398pm_runtime_deactivate_timer(dev);399400if (dev->parent) {401parent = dev->parent;402atomic_add_unless(&parent->power.child_count, -1, 0);403}404}405wake_up_all(&dev->power.wait_queue);406407if (dev->power.deferred_resume) {408rpm_resume(dev, 0);409retval = -EAGAIN;410goto out;411}412413/* Maybe the parent is now able to suspend. */414if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {415spin_unlock(&dev->power.lock);416417spin_lock(&parent->power.lock);418rpm_idle(parent, RPM_ASYNC);419spin_unlock(&parent->power.lock);420421spin_lock(&dev->power.lock);422}423424out:425dev_dbg(dev, "%s returns %d\n", __func__, retval);426427return retval;428}429430/**431* rpm_resume - Carry out run-time resume of given device.432* @dev: Device to resume.433* @rpmflags: Flag bits.434*435* Check if the device's run-time PM status allows it to be resumed. Cancel436* any scheduled or pending requests. If another resume has been started437* earlier, either return immediately or wait for it to finish, depending on the438* RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in439* parallel with this function, either tell the other process to resume after440* suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC441* flag is set then queue a resume request; otherwise run the442* ->runtime_resume() callback directly. Queue an idle notification for the443* device if the resume succeeded.444*445* This function must be called under dev->power.lock with interrupts disabled.446*/447static int rpm_resume(struct device *dev, int rpmflags)448__releases(&dev->power.lock) __acquires(&dev->power.lock)449{450int (*callback)(struct device *);451struct device *parent = NULL;452int retval = 0;453454dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);455456repeat:457if (dev->power.runtime_error)458retval = -EINVAL;459else if (dev->power.disable_depth > 0)460retval = -EAGAIN;461if (retval)462goto out;463464/*465* Other scheduled or pending requests need to be canceled. Small466* optimization: If an autosuspend timer is running, leave it running467* rather than cancelling it now only to restart it again in the near468* future.469*/470dev->power.request = RPM_REQ_NONE;471if (!dev->power.timer_autosuspends)472pm_runtime_deactivate_timer(dev);473474if (dev->power.runtime_status == RPM_ACTIVE) {475retval = 1;476goto out;477}478479if (dev->power.runtime_status == RPM_RESUMING480|| dev->power.runtime_status == RPM_SUSPENDING) {481DEFINE_WAIT(wait);482483if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {484if (dev->power.runtime_status == RPM_SUSPENDING)485dev->power.deferred_resume = true;486else487retval = -EINPROGRESS;488goto out;489}490491/* Wait for the operation carried out in parallel with us. */492for (;;) {493prepare_to_wait(&dev->power.wait_queue, &wait,494TASK_UNINTERRUPTIBLE);495if (dev->power.runtime_status != RPM_RESUMING496&& dev->power.runtime_status != RPM_SUSPENDING)497break;498499spin_unlock_irq(&dev->power.lock);500501schedule();502503spin_lock_irq(&dev->power.lock);504}505finish_wait(&dev->power.wait_queue, &wait);506goto repeat;507}508509/*510* See if we can skip waking up the parent. This is safe only if511* power.no_callbacks is set, because otherwise we don't know whether512* the resume will actually succeed.513*/514if (dev->power.no_callbacks && !parent && dev->parent) {515spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);516if (dev->parent->power.disable_depth > 0517|| dev->parent->power.ignore_children518|| dev->parent->power.runtime_status == RPM_ACTIVE) {519atomic_inc(&dev->parent->power.child_count);520spin_unlock(&dev->parent->power.lock);521goto no_callback; /* Assume success. */522}523spin_unlock(&dev->parent->power.lock);524}525526/* Carry out an asynchronous or a synchronous resume. */527if (rpmflags & RPM_ASYNC) {528dev->power.request = RPM_REQ_RESUME;529if (!dev->power.request_pending) {530dev->power.request_pending = true;531queue_work(pm_wq, &dev->power.work);532}533retval = 0;534goto out;535}536537if (!parent && dev->parent) {538/*539* Increment the parent's usage counter and resume it if540* necessary. Not needed if dev is irq-safe; then the541* parent is permanently resumed.542*/543parent = dev->parent;544if (dev->power.irq_safe)545goto skip_parent;546spin_unlock(&dev->power.lock);547548pm_runtime_get_noresume(parent);549550spin_lock(&parent->power.lock);551/*552* We can resume if the parent's run-time PM is disabled or it553* is set to ignore children.554*/555if (!parent->power.disable_depth556&& !parent->power.ignore_children) {557rpm_resume(parent, 0);558if (parent->power.runtime_status != RPM_ACTIVE)559retval = -EBUSY;560}561spin_unlock(&parent->power.lock);562563spin_lock(&dev->power.lock);564if (retval)565goto out;566goto repeat;567}568skip_parent:569570if (dev->power.no_callbacks)571goto no_callback; /* Assume success. */572573__update_runtime_status(dev, RPM_RESUMING);574575if (dev->pwr_domain)576callback = dev->pwr_domain->ops.runtime_resume;577else if (dev->type && dev->type->pm)578callback = dev->type->pm->runtime_resume;579else if (dev->class && dev->class->pm)580callback = dev->class->pm->runtime_resume;581else if (dev->bus && dev->bus->pm)582callback = dev->bus->pm->runtime_resume;583else584callback = NULL;585586retval = rpm_callback(callback, dev);587if (retval) {588__update_runtime_status(dev, RPM_SUSPENDED);589pm_runtime_cancel_pending(dev);590} else {591no_callback:592__update_runtime_status(dev, RPM_ACTIVE);593if (parent)594atomic_inc(&parent->power.child_count);595}596wake_up_all(&dev->power.wait_queue);597598if (!retval)599rpm_idle(dev, RPM_ASYNC);600601out:602if (parent && !dev->power.irq_safe) {603spin_unlock_irq(&dev->power.lock);604605pm_runtime_put(parent);606607spin_lock_irq(&dev->power.lock);608}609610dev_dbg(dev, "%s returns %d\n", __func__, retval);611612return retval;613}614615/**616* pm_runtime_work - Universal run-time PM work function.617* @work: Work structure used for scheduling the execution of this function.618*619* Use @work to get the device object the work is to be done for, determine what620* is to be done and execute the appropriate run-time PM function.621*/622static void pm_runtime_work(struct work_struct *work)623{624struct device *dev = container_of(work, struct device, power.work);625enum rpm_request req;626627spin_lock_irq(&dev->power.lock);628629if (!dev->power.request_pending)630goto out;631632req = dev->power.request;633dev->power.request = RPM_REQ_NONE;634dev->power.request_pending = false;635636switch (req) {637case RPM_REQ_NONE:638break;639case RPM_REQ_IDLE:640rpm_idle(dev, RPM_NOWAIT);641break;642case RPM_REQ_SUSPEND:643rpm_suspend(dev, RPM_NOWAIT);644break;645case RPM_REQ_AUTOSUSPEND:646rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);647break;648case RPM_REQ_RESUME:649rpm_resume(dev, RPM_NOWAIT);650break;651}652653out:654spin_unlock_irq(&dev->power.lock);655}656657/**658* pm_suspend_timer_fn - Timer function for pm_schedule_suspend().659* @data: Device pointer passed by pm_schedule_suspend().660*661* Check if the time is right and queue a suspend request.662*/663static void pm_suspend_timer_fn(unsigned long data)664{665struct device *dev = (struct device *)data;666unsigned long flags;667unsigned long expires;668669spin_lock_irqsave(&dev->power.lock, flags);670671expires = dev->power.timer_expires;672/* If 'expire' is after 'jiffies' we've been called too early. */673if (expires > 0 && !time_after(expires, jiffies)) {674dev->power.timer_expires = 0;675rpm_suspend(dev, dev->power.timer_autosuspends ?676(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);677}678679spin_unlock_irqrestore(&dev->power.lock, flags);680}681682/**683* pm_schedule_suspend - Set up a timer to submit a suspend request in future.684* @dev: Device to suspend.685* @delay: Time to wait before submitting a suspend request, in milliseconds.686*/687int pm_schedule_suspend(struct device *dev, unsigned int delay)688{689unsigned long flags;690int retval;691692spin_lock_irqsave(&dev->power.lock, flags);693694if (!delay) {695retval = rpm_suspend(dev, RPM_ASYNC);696goto out;697}698699retval = rpm_check_suspend_allowed(dev);700if (retval)701goto out;702703/* Other scheduled or pending requests need to be canceled. */704pm_runtime_cancel_pending(dev);705706dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);707dev->power.timer_expires += !dev->power.timer_expires;708dev->power.timer_autosuspends = 0;709mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);710711out:712spin_unlock_irqrestore(&dev->power.lock, flags);713714return retval;715}716EXPORT_SYMBOL_GPL(pm_schedule_suspend);717718/**719* __pm_runtime_idle - Entry point for run-time idle operations.720* @dev: Device to send idle notification for.721* @rpmflags: Flag bits.722*723* If the RPM_GET_PUT flag is set, decrement the device's usage count and724* return immediately if it is larger than zero. Then carry out an idle725* notification, either synchronous or asynchronous.726*727* This routine may be called in atomic context if the RPM_ASYNC flag is set.728*/729int __pm_runtime_idle(struct device *dev, int rpmflags)730{731unsigned long flags;732int retval;733734if (rpmflags & RPM_GET_PUT) {735if (!atomic_dec_and_test(&dev->power.usage_count))736return 0;737}738739spin_lock_irqsave(&dev->power.lock, flags);740retval = rpm_idle(dev, rpmflags);741spin_unlock_irqrestore(&dev->power.lock, flags);742743return retval;744}745EXPORT_SYMBOL_GPL(__pm_runtime_idle);746747/**748* __pm_runtime_suspend - Entry point for run-time put/suspend operations.749* @dev: Device to suspend.750* @rpmflags: Flag bits.751*752* If the RPM_GET_PUT flag is set, decrement the device's usage count and753* return immediately if it is larger than zero. Then carry out a suspend,754* either synchronous or asynchronous.755*756* This routine may be called in atomic context if the RPM_ASYNC flag is set.757*/758int __pm_runtime_suspend(struct device *dev, int rpmflags)759{760unsigned long flags;761int retval;762763if (rpmflags & RPM_GET_PUT) {764if (!atomic_dec_and_test(&dev->power.usage_count))765return 0;766}767768spin_lock_irqsave(&dev->power.lock, flags);769retval = rpm_suspend(dev, rpmflags);770spin_unlock_irqrestore(&dev->power.lock, flags);771772return retval;773}774EXPORT_SYMBOL_GPL(__pm_runtime_suspend);775776/**777* __pm_runtime_resume - Entry point for run-time resume operations.778* @dev: Device to resume.779* @rpmflags: Flag bits.780*781* If the RPM_GET_PUT flag is set, increment the device's usage count. Then782* carry out a resume, either synchronous or asynchronous.783*784* This routine may be called in atomic context if the RPM_ASYNC flag is set.785*/786int __pm_runtime_resume(struct device *dev, int rpmflags)787{788unsigned long flags;789int retval;790791if (rpmflags & RPM_GET_PUT)792atomic_inc(&dev->power.usage_count);793794spin_lock_irqsave(&dev->power.lock, flags);795retval = rpm_resume(dev, rpmflags);796spin_unlock_irqrestore(&dev->power.lock, flags);797798return retval;799}800EXPORT_SYMBOL_GPL(__pm_runtime_resume);801802/**803* __pm_runtime_set_status - Set run-time PM status of a device.804* @dev: Device to handle.805* @status: New run-time PM status of the device.806*807* If run-time PM of the device is disabled or its power.runtime_error field is808* different from zero, the status may be changed either to RPM_ACTIVE, or to809* RPM_SUSPENDED, as long as that reflects the actual state of the device.810* However, if the device has a parent and the parent is not active, and the811* parent's power.ignore_children flag is unset, the device's status cannot be812* set to RPM_ACTIVE, so -EBUSY is returned in that case.813*814* If successful, __pm_runtime_set_status() clears the power.runtime_error field815* and the device parent's counter of unsuspended children is modified to816* reflect the new status. If the new status is RPM_SUSPENDED, an idle817* notification request for the parent is submitted.818*/819int __pm_runtime_set_status(struct device *dev, unsigned int status)820{821struct device *parent = dev->parent;822unsigned long flags;823bool notify_parent = false;824int error = 0;825826if (status != RPM_ACTIVE && status != RPM_SUSPENDED)827return -EINVAL;828829spin_lock_irqsave(&dev->power.lock, flags);830831if (!dev->power.runtime_error && !dev->power.disable_depth) {832error = -EAGAIN;833goto out;834}835836if (dev->power.runtime_status == status)837goto out_set;838839if (status == RPM_SUSPENDED) {840/* It always is possible to set the status to 'suspended'. */841if (parent) {842atomic_add_unless(&parent->power.child_count, -1, 0);843notify_parent = !parent->power.ignore_children;844}845goto out_set;846}847848if (parent) {849spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);850851/*852* It is invalid to put an active child under a parent that is853* not active, has run-time PM enabled and the854* 'power.ignore_children' flag unset.855*/856if (!parent->power.disable_depth857&& !parent->power.ignore_children858&& parent->power.runtime_status != RPM_ACTIVE)859error = -EBUSY;860else if (dev->power.runtime_status == RPM_SUSPENDED)861atomic_inc(&parent->power.child_count);862863spin_unlock(&parent->power.lock);864865if (error)866goto out;867}868869out_set:870__update_runtime_status(dev, status);871dev->power.runtime_error = 0;872out:873spin_unlock_irqrestore(&dev->power.lock, flags);874875if (notify_parent)876pm_request_idle(parent);877878return error;879}880EXPORT_SYMBOL_GPL(__pm_runtime_set_status);881882/**883* __pm_runtime_barrier - Cancel pending requests and wait for completions.884* @dev: Device to handle.885*886* Flush all pending requests for the device from pm_wq and wait for all887* run-time PM operations involving the device in progress to complete.888*889* Should be called under dev->power.lock with interrupts disabled.890*/891static void __pm_runtime_barrier(struct device *dev)892{893pm_runtime_deactivate_timer(dev);894895if (dev->power.request_pending) {896dev->power.request = RPM_REQ_NONE;897spin_unlock_irq(&dev->power.lock);898899cancel_work_sync(&dev->power.work);900901spin_lock_irq(&dev->power.lock);902dev->power.request_pending = false;903}904905if (dev->power.runtime_status == RPM_SUSPENDING906|| dev->power.runtime_status == RPM_RESUMING907|| dev->power.idle_notification) {908DEFINE_WAIT(wait);909910/* Suspend, wake-up or idle notification in progress. */911for (;;) {912prepare_to_wait(&dev->power.wait_queue, &wait,913TASK_UNINTERRUPTIBLE);914if (dev->power.runtime_status != RPM_SUSPENDING915&& dev->power.runtime_status != RPM_RESUMING916&& !dev->power.idle_notification)917break;918spin_unlock_irq(&dev->power.lock);919920schedule();921922spin_lock_irq(&dev->power.lock);923}924finish_wait(&dev->power.wait_queue, &wait);925}926}927928/**929* pm_runtime_barrier - Flush pending requests and wait for completions.930* @dev: Device to handle.931*932* Prevent the device from being suspended by incrementing its usage counter and933* if there's a pending resume request for the device, wake the device up.934* Next, make sure that all pending requests for the device have been flushed935* from pm_wq and wait for all run-time PM operations involving the device in936* progress to complete.937*938* Return value:939* 1, if there was a resume request pending and the device had to be woken up,940* 0, otherwise941*/942int pm_runtime_barrier(struct device *dev)943{944int retval = 0;945946pm_runtime_get_noresume(dev);947spin_lock_irq(&dev->power.lock);948949if (dev->power.request_pending950&& dev->power.request == RPM_REQ_RESUME) {951rpm_resume(dev, 0);952retval = 1;953}954955__pm_runtime_barrier(dev);956957spin_unlock_irq(&dev->power.lock);958pm_runtime_put_noidle(dev);959960return retval;961}962EXPORT_SYMBOL_GPL(pm_runtime_barrier);963964/**965* __pm_runtime_disable - Disable run-time PM of a device.966* @dev: Device to handle.967* @check_resume: If set, check if there's a resume request for the device.968*969* Increment power.disable_depth for the device and if was zero previously,970* cancel all pending run-time PM requests for the device and wait for all971* operations in progress to complete. The device can be either active or972* suspended after its run-time PM has been disabled.973*974* If @check_resume is set and there's a resume request pending when975* __pm_runtime_disable() is called and power.disable_depth is zero, the976* function will wake up the device before disabling its run-time PM.977*/978void __pm_runtime_disable(struct device *dev, bool check_resume)979{980spin_lock_irq(&dev->power.lock);981982if (dev->power.disable_depth > 0) {983dev->power.disable_depth++;984goto out;985}986987/*988* Wake up the device if there's a resume request pending, because that989* means there probably is some I/O to process and disabling run-time PM990* shouldn't prevent the device from processing the I/O.991*/992if (check_resume && dev->power.request_pending993&& dev->power.request == RPM_REQ_RESUME) {994/*995* Prevent suspends and idle notifications from being carried996* out after we have woken up the device.997*/998pm_runtime_get_noresume(dev);9991000rpm_resume(dev, 0);10011002pm_runtime_put_noidle(dev);1003}10041005if (!dev->power.disable_depth++)1006__pm_runtime_barrier(dev);10071008out:1009spin_unlock_irq(&dev->power.lock);1010}1011EXPORT_SYMBOL_GPL(__pm_runtime_disable);10121013/**1014* pm_runtime_enable - Enable run-time PM of a device.1015* @dev: Device to handle.1016*/1017void pm_runtime_enable(struct device *dev)1018{1019unsigned long flags;10201021spin_lock_irqsave(&dev->power.lock, flags);10221023if (dev->power.disable_depth > 0)1024dev->power.disable_depth--;1025else1026dev_warn(dev, "Unbalanced %s!\n", __func__);10271028spin_unlock_irqrestore(&dev->power.lock, flags);1029}1030EXPORT_SYMBOL_GPL(pm_runtime_enable);10311032/**1033* pm_runtime_forbid - Block run-time PM of a device.1034* @dev: Device to handle.1035*1036* Increase the device's usage count and clear its power.runtime_auto flag,1037* so that it cannot be suspended at run time until pm_runtime_allow() is called1038* for it.1039*/1040void pm_runtime_forbid(struct device *dev)1041{1042spin_lock_irq(&dev->power.lock);1043if (!dev->power.runtime_auto)1044goto out;10451046dev->power.runtime_auto = false;1047atomic_inc(&dev->power.usage_count);1048rpm_resume(dev, 0);10491050out:1051spin_unlock_irq(&dev->power.lock);1052}1053EXPORT_SYMBOL_GPL(pm_runtime_forbid);10541055/**1056* pm_runtime_allow - Unblock run-time PM of a device.1057* @dev: Device to handle.1058*1059* Decrease the device's usage count and set its power.runtime_auto flag.1060*/1061void pm_runtime_allow(struct device *dev)1062{1063spin_lock_irq(&dev->power.lock);1064if (dev->power.runtime_auto)1065goto out;10661067dev->power.runtime_auto = true;1068if (atomic_dec_and_test(&dev->power.usage_count))1069rpm_idle(dev, RPM_AUTO);10701071out:1072spin_unlock_irq(&dev->power.lock);1073}1074EXPORT_SYMBOL_GPL(pm_runtime_allow);10751076/**1077* pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.1078* @dev: Device to handle.1079*1080* Set the power.no_callbacks flag, which tells the PM core that this1081* device is power-managed through its parent and has no run-time PM1082* callbacks of its own. The run-time sysfs attributes will be removed.1083*/1084void pm_runtime_no_callbacks(struct device *dev)1085{1086spin_lock_irq(&dev->power.lock);1087dev->power.no_callbacks = 1;1088spin_unlock_irq(&dev->power.lock);1089if (device_is_registered(dev))1090rpm_sysfs_remove(dev);1091}1092EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);10931094/**1095* pm_runtime_irq_safe - Leave interrupts disabled during callbacks.1096* @dev: Device to handle1097*1098* Set the power.irq_safe flag, which tells the PM core that the1099* ->runtime_suspend() and ->runtime_resume() callbacks for this device should1100* always be invoked with the spinlock held and interrupts disabled. It also1101* causes the parent's usage counter to be permanently incremented, preventing1102* the parent from runtime suspending -- otherwise an irq-safe child might have1103* to wait for a non-irq-safe parent.1104*/1105void pm_runtime_irq_safe(struct device *dev)1106{1107if (dev->parent)1108pm_runtime_get_sync(dev->parent);1109spin_lock_irq(&dev->power.lock);1110dev->power.irq_safe = 1;1111spin_unlock_irq(&dev->power.lock);1112}1113EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);11141115/**1116* update_autosuspend - Handle a change to a device's autosuspend settings.1117* @dev: Device to handle.1118* @old_delay: The former autosuspend_delay value.1119* @old_use: The former use_autosuspend value.1120*1121* Prevent runtime suspend if the new delay is negative and use_autosuspend is1122* set; otherwise allow it. Send an idle notification if suspends are allowed.1123*1124* This function must be called under dev->power.lock with interrupts disabled.1125*/1126static void update_autosuspend(struct device *dev, int old_delay, int old_use)1127{1128int delay = dev->power.autosuspend_delay;11291130/* Should runtime suspend be prevented now? */1131if (dev->power.use_autosuspend && delay < 0) {11321133/* If it used to be allowed then prevent it. */1134if (!old_use || old_delay >= 0) {1135atomic_inc(&dev->power.usage_count);1136rpm_resume(dev, 0);1137}1138}11391140/* Runtime suspend should be allowed now. */1141else {11421143/* If it used to be prevented then allow it. */1144if (old_use && old_delay < 0)1145atomic_dec(&dev->power.usage_count);11461147/* Maybe we can autosuspend now. */1148rpm_idle(dev, RPM_AUTO);1149}1150}11511152/**1153* pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.1154* @dev: Device to handle.1155* @delay: Value of the new delay in milliseconds.1156*1157* Set the device's power.autosuspend_delay value. If it changes to negative1158* and the power.use_autosuspend flag is set, prevent run-time suspends. If it1159* changes the other way, allow run-time suspends.1160*/1161void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)1162{1163int old_delay, old_use;11641165spin_lock_irq(&dev->power.lock);1166old_delay = dev->power.autosuspend_delay;1167old_use = dev->power.use_autosuspend;1168dev->power.autosuspend_delay = delay;1169update_autosuspend(dev, old_delay, old_use);1170spin_unlock_irq(&dev->power.lock);1171}1172EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);11731174/**1175* __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.1176* @dev: Device to handle.1177* @use: New value for use_autosuspend.1178*1179* Set the device's power.use_autosuspend flag, and allow or prevent run-time1180* suspends as needed.1181*/1182void __pm_runtime_use_autosuspend(struct device *dev, bool use)1183{1184int old_delay, old_use;11851186spin_lock_irq(&dev->power.lock);1187old_delay = dev->power.autosuspend_delay;1188old_use = dev->power.use_autosuspend;1189dev->power.use_autosuspend = use;1190update_autosuspend(dev, old_delay, old_use);1191spin_unlock_irq(&dev->power.lock);1192}1193EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);11941195/**1196* pm_runtime_init - Initialize run-time PM fields in given device object.1197* @dev: Device object to initialize.1198*/1199void pm_runtime_init(struct device *dev)1200{1201dev->power.runtime_status = RPM_SUSPENDED;1202dev->power.idle_notification = false;12031204dev->power.disable_depth = 1;1205atomic_set(&dev->power.usage_count, 0);12061207dev->power.runtime_error = 0;12081209atomic_set(&dev->power.child_count, 0);1210pm_suspend_ignore_children(dev, false);1211dev->power.runtime_auto = true;12121213dev->power.request_pending = false;1214dev->power.request = RPM_REQ_NONE;1215dev->power.deferred_resume = false;1216dev->power.accounting_timestamp = jiffies;1217INIT_WORK(&dev->power.work, pm_runtime_work);12181219dev->power.timer_expires = 0;1220setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,1221(unsigned long)dev);12221223init_waitqueue_head(&dev->power.wait_queue);1224}12251226/**1227* pm_runtime_remove - Prepare for removing a device from device hierarchy.1228* @dev: Device object being removed from device hierarchy.1229*/1230void pm_runtime_remove(struct device *dev)1231{1232__pm_runtime_disable(dev, false);12331234/* Change the status back to 'suspended' to match the initial status. */1235if (dev->power.runtime_status == RPM_ACTIVE)1236pm_runtime_set_suspended(dev);1237if (dev->power.irq_safe && dev->parent)1238pm_runtime_put_sync(dev->parent);1239}124012411242